mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
Infer alignment of unmarked loads in IR/bitcode parsing.
For IR generated by a compiler, this is really simple: you just take the datalayout from the beginning of the file, and apply it to all the IR later in the file. For optimization testcases that don't care about the datalayout, this is also really simple: we just use the default datalayout. The complexity here comes from the fact that some LLVM tools allow overriding the datalayout: some tools have an explicit flag for this, some tools will infer a datalayout based on the code generation target. Supporting this properly required plumbing through a bunch of new machinery: we want to allow overriding the datalayout after the datalayout is parsed from the file, but before we use any information from it. Therefore, IR/bitcode parsing now has a callback to allow tools to compute the datalayout at the appropriate time. Not sure if I covered all the LLVM tools that want to use the callback. (clang? lli? Misc IR manipulation tools like llvm-link?). But this is at least enough for all the LLVM regression tests, and IR without a datalayout is not something frontends should generate. This change had some sort of weird effects for certain CodeGen regression tests: if the datalayout is overridden with a datalayout with a different program or stack address space, we now parse IR based on the overridden datalayout, instead of the one written in the file (or the default one, if none is specified). This broke a few AVR tests, and one AMDGPU test. Outside the CodeGen tests I mentioned, the test changes are all just fixing CHECK lines and moving around datalayout lines in weird places. Differential Revision: https://reviews.llvm.org/D78403
This commit is contained in:
parent
2a4ce8280b
commit
f5d3346387
@ -13,6 +13,7 @@
|
||||
#ifndef LLVM_ASMPARSER_PARSER_H
|
||||
#define LLVM_ASMPARSER_PARSER_H
|
||||
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include <memory>
|
||||
|
||||
@ -27,6 +28,9 @@ struct SlotMapping;
|
||||
class SMDiagnostic;
|
||||
class Type;
|
||||
|
||||
typedef llvm::function_ref<Optional<std::string>(StringRef)>
|
||||
DataLayoutCallbackTy;
|
||||
|
||||
/// This function is a main interface to the LLVM Assembly Parser. It parses
|
||||
/// an ASCII file that (presumably) contains LLVM Assembly code. It returns a
|
||||
/// Module (intermediate representation) with the corresponding features. Note
|
||||
@ -38,14 +42,9 @@ class Type;
|
||||
/// \param Context Context in which to allocate globals info.
|
||||
/// \param Slots The optional slot mapping that will be initialized during
|
||||
/// parsing.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module>
|
||||
parseAssemblyFile(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr, bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
std::unique_ptr<Module> parseAssemblyFile(StringRef Filename, SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr);
|
||||
|
||||
/// The function is a secondary interface to the LLVM Assembly Parser. It parses
|
||||
/// an ASCII string that (presumably) contains LLVM Assembly code. It returns a
|
||||
@ -58,16 +57,10 @@ parseAssemblyFile(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
/// \param Context Context in which to allocate globals info.
|
||||
/// \param Slots The optional slot mapping that will be initialized during
|
||||
/// parsing.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module> parseAssemblyString(StringRef AsmString,
|
||||
SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr,
|
||||
bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
SlotMapping *Slots = nullptr);
|
||||
|
||||
/// Holds the Module and ModuleSummaryIndex returned by the interfaces
|
||||
/// that parse both.
|
||||
@ -88,15 +81,16 @@ struct ParsedModuleAndIndex {
|
||||
/// \param Context Context in which to allocate globals info.
|
||||
/// \param Slots The optional slot mapping that will be initialized during
|
||||
/// parsing.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
ParsedModuleAndIndex
|
||||
parseAssemblyFileWithIndex(StringRef Filename, SMDiagnostic &Err,
|
||||
LLVMContext &Context, SlotMapping *Slots = nullptr,
|
||||
bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
|
||||
ParsedModuleAndIndex parseAssemblyFileWithIndex(
|
||||
StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
|
||||
|
||||
/// Only for use in llvm-as for testing; this does not produce a valid module.
|
||||
ParsedModuleAndIndex parseAssemblyFileWithIndexNoUpgradeDebugInfo(
|
||||
StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots, DataLayoutCallbackTy DataLayoutCallback);
|
||||
|
||||
/// This function is a main interface to the LLVM Assembly Parser. It parses
|
||||
/// an ASCII file that (presumably) contains LLVM Assembly code for a module
|
||||
@ -115,15 +109,11 @@ parseSummaryIndexAssemblyFile(StringRef Filename, SMDiagnostic &Err);
|
||||
/// \param Err Error result info.
|
||||
/// \param Slots The optional slot mapping that will be initialized during
|
||||
/// parsing.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module> parseAssembly(MemoryBufferRef F, SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr,
|
||||
bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module> parseAssembly(
|
||||
MemoryBufferRef F, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
|
||||
|
||||
/// Parse LLVM Assembly including the summary index from a MemoryBuffer.
|
||||
///
|
||||
@ -131,18 +121,13 @@ std::unique_ptr<Module> parseAssembly(MemoryBufferRef F, SMDiagnostic &Err,
|
||||
/// \param Err Error result info.
|
||||
/// \param Slots The optional slot mapping that will be initialized during
|
||||
/// parsing.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
|
||||
///
|
||||
/// parseAssemblyFileWithIndex is a wrapper around this function.
|
||||
ParsedModuleAndIndex parseAssemblyWithIndex(MemoryBufferRef F,
|
||||
SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr,
|
||||
bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
SlotMapping *Slots = nullptr);
|
||||
|
||||
/// Parse LLVM Assembly for summary index from a MemoryBuffer.
|
||||
///
|
||||
@ -165,14 +150,11 @@ parseSummaryIndexAssembly(MemoryBufferRef F, SMDiagnostic &Err);
|
||||
/// \param Slots The optional slot mapping that will be initialized during
|
||||
/// parsing.
|
||||
/// \return true on error.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
bool parseAssemblyInto(MemoryBufferRef F, Module *M, ModuleSummaryIndex *Index,
|
||||
SMDiagnostic &Err, SlotMapping *Slots = nullptr,
|
||||
bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
|
||||
bool parseAssemblyInto(
|
||||
MemoryBufferRef F, Module *M, ModuleSummaryIndex *Index, SMDiagnostic &Err,
|
||||
SlotMapping *Slots = nullptr,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
|
||||
|
||||
/// Parse a type and a constant value in the given string.
|
||||
///
|
||||
|
@ -31,6 +31,9 @@ namespace llvm {
|
||||
class LLVMContext;
|
||||
class Module;
|
||||
|
||||
typedef llvm::function_ref<Optional<std::string>(StringRef)>
|
||||
DataLayoutCallbackTy;
|
||||
|
||||
// These functions are for converting Expected/Error values to
|
||||
// ErrorOr/std::error_code for compatibility with legacy clients. FIXME:
|
||||
// Remove these functions once no longer needed by the C and libLTO APIs.
|
||||
@ -77,10 +80,10 @@ class Module;
|
||||
friend Expected<BitcodeFileContents>
|
||||
getBitcodeFileContents(MemoryBufferRef Buffer);
|
||||
|
||||
Expected<std::unique_ptr<Module>> getModuleImpl(LLVMContext &Context,
|
||||
bool MaterializeAll,
|
||||
bool ShouldLazyLoadMetadata,
|
||||
bool IsImporting);
|
||||
Expected<std::unique_ptr<Module>>
|
||||
getModuleImpl(LLVMContext &Context, bool MaterializeAll,
|
||||
bool ShouldLazyLoadMetadata, bool IsImporting,
|
||||
DataLayoutCallbackTy DataLayoutCallback);
|
||||
|
||||
public:
|
||||
StringRef getBuffer() const {
|
||||
@ -100,7 +103,9 @@ class Module;
|
||||
bool IsImporting);
|
||||
|
||||
/// Read the entire bitcode module and return it.
|
||||
Expected<std::unique_ptr<Module>> parseModule(LLVMContext &Context);
|
||||
Expected<std::unique_ptr<Module>> parseModule(
|
||||
LLVMContext &Context, DataLayoutCallbackTy DataLayoutCallback =
|
||||
[](StringRef) { return None; });
|
||||
|
||||
/// Returns information about the module to be used for LTO: whether to
|
||||
/// compile with ThinLTO, and whether it has a summary.
|
||||
@ -163,8 +168,11 @@ class Module;
|
||||
Expected<std::string> getBitcodeProducerString(MemoryBufferRef Buffer);
|
||||
|
||||
/// Read the specified bitcode file, returning the module.
|
||||
Expected<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer,
|
||||
LLVMContext &Context);
|
||||
Expected<std::unique_ptr<Module>> parseBitcodeFile(
|
||||
MemoryBufferRef Buffer, LLVMContext &Context,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) {
|
||||
return None;
|
||||
});
|
||||
|
||||
/// Returns LTO information for the specified bitcode file.
|
||||
Expected<BitcodeLTOInfo> getBitcodeLTOInfo(MemoryBufferRef Buffer);
|
||||
|
@ -29,6 +29,9 @@ class MachineModuleInfo;
|
||||
class SMDiagnostic;
|
||||
class StringRef;
|
||||
|
||||
typedef llvm::function_ref<Optional<std::string>(StringRef)>
|
||||
DataLayoutCallbackTy;
|
||||
|
||||
/// This class initializes machine functions by applying the state loaded from
|
||||
/// a MIR file.
|
||||
class MIRParser {
|
||||
@ -43,7 +46,8 @@ public:
|
||||
///
|
||||
/// A new, empty module is created if the LLVM IR isn't present.
|
||||
/// \returns nullptr if a parsing error occurred.
|
||||
std::unique_ptr<Module> parseIRModule();
|
||||
std::unique_ptr<Module> parseIRModule(
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
|
||||
|
||||
/// Parses MachineFunctions in the MIR file and add them to the given
|
||||
/// MachineModuleInfo \p MMI.
|
||||
|
@ -14,6 +14,7 @@
|
||||
#ifndef LLVM_IRREADER_IRREADER_H
|
||||
#define LLVM_IRREADER_IRREADER_H
|
||||
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include <memory>
|
||||
|
||||
@ -25,6 +26,9 @@ class Module;
|
||||
class SMDiagnostic;
|
||||
class LLVMContext;
|
||||
|
||||
typedef llvm::function_ref<Optional<std::string>(StringRef)>
|
||||
DataLayoutCallbackTy;
|
||||
|
||||
/// If the given MemoryBuffer holds a bitcode image, return a Module
|
||||
/// for it which does lazy deserialization of function bodies. Otherwise,
|
||||
/// attempt to parse it as LLVM Assembly and return a fully populated
|
||||
@ -47,26 +51,18 @@ getLazyIRFileModule(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
/// If the given MemoryBuffer holds a bitcode image, return a Module
|
||||
/// for it. Otherwise, attempt to parse it as LLVM Assembly and return
|
||||
/// a Module for it.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module> parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module> parseIR(
|
||||
MemoryBufferRef Buffer, SMDiagnostic &Err, LLVMContext &Context,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
|
||||
|
||||
/// If the given file holds a bitcode image, return a Module for it.
|
||||
/// Otherwise, attempt to parse it as LLVM Assembly and return a Module
|
||||
/// for it.
|
||||
/// \param UpgradeDebugInfo Run UpgradeDebugInfo, which runs the Verifier.
|
||||
/// This option should only be set to false by llvm-as
|
||||
/// for use inside the LLVM testuite!
|
||||
/// \param DataLayoutString Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module> parseIRFile(StringRef Filename, SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "");
|
||||
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
|
||||
std::unique_ptr<Module> parseIRFile(
|
||||
StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -61,7 +61,8 @@ static std::string getTypeString(Type *T) {
|
||||
}
|
||||
|
||||
/// Run: module ::= toplevelentity*
|
||||
bool LLParser::Run() {
|
||||
bool LLParser::Run(bool UpgradeDebugInfo,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
// Prime the lexer.
|
||||
Lex.Lex();
|
||||
|
||||
@ -73,9 +74,12 @@ bool LLParser::Run() {
|
||||
if (M) {
|
||||
if (ParseTargetDefinitions())
|
||||
return true;
|
||||
|
||||
if (auto LayoutOverride = DataLayoutCallback(M->getTargetTriple()))
|
||||
M->setDataLayout(*LayoutOverride);
|
||||
}
|
||||
|
||||
return ParseTopLevelEntities() || ValidateEndOfModule() ||
|
||||
return ParseTopLevelEntities() || ValidateEndOfModule(UpgradeDebugInfo) ||
|
||||
ValidateEndOfIndex();
|
||||
}
|
||||
|
||||
@ -123,7 +127,7 @@ void LLParser::restoreParsingState(const SlotMapping *Slots) {
|
||||
|
||||
/// ValidateEndOfModule - Do final validity and sanity checks at the end of the
|
||||
/// module.
|
||||
bool LLParser::ValidateEndOfModule() {
|
||||
bool LLParser::ValidateEndOfModule(bool UpgradeDebugInfo) {
|
||||
if (!M)
|
||||
return false;
|
||||
// Handle any function attribute group forward references.
|
||||
@ -400,8 +404,7 @@ bool LLParser::ParseTargetDefinition() {
|
||||
if (ParseToken(lltok::equal, "expected '=' after target datalayout") ||
|
||||
ParseStringConstant(Str))
|
||||
return true;
|
||||
if (DataLayoutStr.empty())
|
||||
M->setDataLayout(Str);
|
||||
M->setDataLayout(Str);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -7053,8 +7056,11 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
if (Ty != cast<PointerType>(Val->getType())->getElementType())
|
||||
return Error(ExplicitTypeLoc,
|
||||
"explicit pointee type doesn't match operand's pointee type");
|
||||
|
||||
Inst = new LoadInst(Ty, Val, "", isVolatile, Alignment, Ordering, SSID);
|
||||
if (!Alignment && !Ty->isSized())
|
||||
return Error(ExplicitTypeLoc, "loading unsized types is not allowed");
|
||||
if (!Alignment)
|
||||
Alignment = M->getDataLayout().getABITypeAlign(Ty);
|
||||
Inst = new LoadInst(Ty, Val, "", isVolatile, *Alignment, Ordering, SSID);
|
||||
return AteExtraComma ? InstExtraComma : InstNormal;
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "LLLexer.h"
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/ADT/StringMap.h"
|
||||
#include "llvm/AsmParser/Parser.h"
|
||||
#include "llvm/IR/Attributes.h"
|
||||
#include "llvm/IR/Instructions.h"
|
||||
#include "llvm/IR/ModuleSummaryIndex.h"
|
||||
@ -156,23 +157,17 @@ namespace llvm {
|
||||
/// UpgradeDebuginfo so it can generate broken bitcode.
|
||||
bool UpgradeDebugInfo;
|
||||
|
||||
/// DataLayout string to override that in LLVM assembly.
|
||||
StringRef DataLayoutStr;
|
||||
|
||||
std::string SourceFileName;
|
||||
|
||||
public:
|
||||
LLParser(StringRef F, SourceMgr &SM, SMDiagnostic &Err, Module *M,
|
||||
ModuleSummaryIndex *Index, LLVMContext &Context,
|
||||
SlotMapping *Slots = nullptr, bool UpgradeDebugInfo = true,
|
||||
StringRef DataLayoutString = "")
|
||||
SlotMapping *Slots = nullptr)
|
||||
: Context(Context), Lex(F, SM, Err, Context), M(M), Index(Index),
|
||||
Slots(Slots), BlockAddressPFS(nullptr),
|
||||
UpgradeDebugInfo(UpgradeDebugInfo), DataLayoutStr(DataLayoutString) {
|
||||
if (!DataLayoutStr.empty())
|
||||
M->setDataLayout(DataLayoutStr);
|
||||
}
|
||||
bool Run();
|
||||
Slots(Slots), BlockAddressPFS(nullptr) {}
|
||||
bool Run(
|
||||
bool UpgradeDebugInfo,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](Module *) {});
|
||||
|
||||
bool parseStandaloneConstantValue(Constant *&C, const SlotMapping *Slots);
|
||||
|
||||
@ -302,7 +297,7 @@ namespace llvm {
|
||||
|
||||
// Top-Level Entities
|
||||
bool ParseTopLevelEntities();
|
||||
bool ValidateEndOfModule();
|
||||
bool ValidateEndOfModule(bool UpgradeDebugInfo);
|
||||
bool ValidateEndOfIndex();
|
||||
bool ParseTargetDefinitions();
|
||||
bool ParseTargetDefinition();
|
||||
|
@ -22,39 +22,45 @@
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
bool llvm::parseAssemblyInto(MemoryBufferRef F, Module *M,
|
||||
ModuleSummaryIndex *Index, SMDiagnostic &Err,
|
||||
SlotMapping *Slots, bool UpgradeDebugInfo,
|
||||
StringRef DataLayoutString) {
|
||||
static bool parseAssemblyInto(MemoryBufferRef F, Module *M,
|
||||
ModuleSummaryIndex *Index, SMDiagnostic &Err,
|
||||
SlotMapping *Slots, bool UpgradeDebugInfo,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
SourceMgr SM;
|
||||
std::unique_ptr<MemoryBuffer> Buf = MemoryBuffer::getMemBuffer(F);
|
||||
SM.AddNewSourceBuffer(std::move(Buf), SMLoc());
|
||||
|
||||
LLVMContext Context;
|
||||
return LLParser(F.getBuffer(), SM, Err, M, Index,
|
||||
M ? M->getContext() : Context, Slots, UpgradeDebugInfo,
|
||||
DataLayoutString)
|
||||
.Run();
|
||||
M ? M->getContext() : Context, Slots)
|
||||
.Run(UpgradeDebugInfo, DataLayoutCallback);
|
||||
}
|
||||
|
||||
bool llvm::parseAssemblyInto(MemoryBufferRef F, Module *M,
|
||||
ModuleSummaryIndex *Index, SMDiagnostic &Err,
|
||||
SlotMapping *Slots,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
return ::parseAssemblyInto(F, M, Index, Err, Slots,
|
||||
/*UpgradeDebugInfo*/ true, DataLayoutCallback);
|
||||
}
|
||||
|
||||
std::unique_ptr<Module>
|
||||
llvm::parseAssembly(MemoryBufferRef F, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots, bool UpgradeDebugInfo,
|
||||
StringRef DataLayoutString) {
|
||||
SlotMapping *Slots,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
std::unique_ptr<Module> M =
|
||||
std::make_unique<Module>(F.getBufferIdentifier(), Context);
|
||||
|
||||
if (parseAssemblyInto(F, M.get(), nullptr, Err, Slots, UpgradeDebugInfo,
|
||||
DataLayoutString))
|
||||
if (parseAssemblyInto(F, M.get(), nullptr, Err, Slots, DataLayoutCallback))
|
||||
return nullptr;
|
||||
|
||||
return M;
|
||||
}
|
||||
|
||||
std::unique_ptr<Module>
|
||||
llvm::parseAssemblyFile(StringRef Filename, SMDiagnostic &Err,
|
||||
LLVMContext &Context, SlotMapping *Slots,
|
||||
bool UpgradeDebugInfo, StringRef DataLayoutString) {
|
||||
std::unique_ptr<Module> llvm::parseAssemblyFile(StringRef Filename,
|
||||
SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
SlotMapping *Slots) {
|
||||
ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
|
||||
MemoryBuffer::getFileOrSTDIN(Filename);
|
||||
if (std::error_code EC = FileOrErr.getError()) {
|
||||
@ -63,28 +69,40 @@ llvm::parseAssemblyFile(StringRef Filename, SMDiagnostic &Err,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return parseAssembly(FileOrErr.get()->getMemBufferRef(), Err, Context, Slots,
|
||||
UpgradeDebugInfo, DataLayoutString);
|
||||
return parseAssembly(FileOrErr.get()->getMemBufferRef(), Err, Context, Slots);
|
||||
}
|
||||
|
||||
ParsedModuleAndIndex llvm::parseAssemblyWithIndex(
|
||||
MemoryBufferRef F, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots, bool UpgradeDebugInfo, StringRef DataLayoutString) {
|
||||
static ParsedModuleAndIndex
|
||||
parseAssemblyWithIndex(MemoryBufferRef F, SMDiagnostic &Err,
|
||||
LLVMContext &Context, SlotMapping *Slots,
|
||||
bool UpgradeDebugInfo,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
std::unique_ptr<Module> M =
|
||||
std::make_unique<Module>(F.getBufferIdentifier(), Context);
|
||||
std::unique_ptr<ModuleSummaryIndex> Index =
|
||||
std::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/true);
|
||||
|
||||
if (parseAssemblyInto(F, M.get(), Index.get(), Err, Slots, UpgradeDebugInfo,
|
||||
DataLayoutString))
|
||||
DataLayoutCallback))
|
||||
return {nullptr, nullptr};
|
||||
|
||||
return {std::move(M), std::move(Index)};
|
||||
}
|
||||
|
||||
ParsedModuleAndIndex llvm::parseAssemblyFileWithIndex(
|
||||
StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots, bool UpgradeDebugInfo, StringRef DataLayoutString) {
|
||||
ParsedModuleAndIndex llvm::parseAssemblyWithIndex(MemoryBufferRef F,
|
||||
SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
SlotMapping *Slots) {
|
||||
return ::parseAssemblyWithIndex(F, Err, Context, Slots,
|
||||
/*UpgradeDebugInfo*/ true,
|
||||
[](StringRef) { return None; });
|
||||
}
|
||||
|
||||
static ParsedModuleAndIndex
|
||||
parseAssemblyFileWithIndex(StringRef Filename, SMDiagnostic &Err,
|
||||
LLVMContext &Context, SlotMapping *Slots,
|
||||
bool UpgradeDebugInfo,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
|
||||
MemoryBuffer::getFileOrSTDIN(Filename);
|
||||
if (std::error_code EC = FileOrErr.getError()) {
|
||||
@ -95,16 +113,32 @@ ParsedModuleAndIndex llvm::parseAssemblyFileWithIndex(
|
||||
|
||||
return parseAssemblyWithIndex(FileOrErr.get()->getMemBufferRef(), Err,
|
||||
Context, Slots, UpgradeDebugInfo,
|
||||
DataLayoutString);
|
||||
DataLayoutCallback);
|
||||
}
|
||||
|
||||
std::unique_ptr<Module>
|
||||
llvm::parseAssemblyString(StringRef AsmString, SMDiagnostic &Err,
|
||||
LLVMContext &Context, SlotMapping *Slots,
|
||||
bool UpgradeDebugInfo, StringRef DataLayoutString) {
|
||||
ParsedModuleAndIndex
|
||||
llvm::parseAssemblyFileWithIndex(StringRef Filename, SMDiagnostic &Err,
|
||||
LLVMContext &Context, SlotMapping *Slots,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
return ::parseAssemblyFileWithIndex(Filename, Err, Context, Slots,
|
||||
/*UpgradeDebugInfo*/ true,
|
||||
DataLayoutCallback);
|
||||
}
|
||||
|
||||
ParsedModuleAndIndex llvm::parseAssemblyFileWithIndexNoUpgradeDebugInfo(
|
||||
StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
SlotMapping *Slots, DataLayoutCallbackTy DataLayoutCallback) {
|
||||
return ::parseAssemblyFileWithIndex(Filename, Err, Context, Slots,
|
||||
/*UpgradeDebugInfo*/ false,
|
||||
DataLayoutCallback);
|
||||
}
|
||||
|
||||
std::unique_ptr<Module> llvm::parseAssemblyString(StringRef AsmString,
|
||||
SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
SlotMapping *Slots) {
|
||||
MemoryBufferRef F(AsmString, "<string>");
|
||||
return parseAssembly(F, Err, Context, Slots, UpgradeDebugInfo,
|
||||
DataLayoutString);
|
||||
return parseAssembly(F, Err, Context, Slots);
|
||||
}
|
||||
|
||||
static bool parseSummaryIndexAssemblyInto(MemoryBufferRef F,
|
||||
@ -117,7 +151,8 @@ static bool parseSummaryIndexAssemblyInto(MemoryBufferRef F,
|
||||
// The parser holds a reference to a context that is unused when parsing the
|
||||
// index, but we need to initialize it.
|
||||
LLVMContext unusedContext;
|
||||
return LLParser(F.getBuffer(), SM, Err, nullptr, &Index, unusedContext).Run();
|
||||
return LLParser(F.getBuffer(), SM, Err, nullptr, &Index, unusedContext)
|
||||
.Run(true, [](StringRef) { return None; });
|
||||
}
|
||||
|
||||
std::unique_ptr<ModuleSummaryIndex>
|
||||
|
@ -577,8 +577,11 @@ public:
|
||||
|
||||
/// Main interface to parsing a bitcode buffer.
|
||||
/// \returns true if an error occurred.
|
||||
Error parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata = false,
|
||||
bool IsImporting = false);
|
||||
Error parseBitcodeInto(
|
||||
Module *M, bool ShouldLazyLoadMetadata = false, bool IsImporting = false,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](std::string) {
|
||||
return None;
|
||||
});
|
||||
|
||||
static uint64_t decodeSignRotatedValue(uint64_t V);
|
||||
|
||||
@ -723,7 +726,9 @@ private:
|
||||
/// a corresponding error code.
|
||||
Error parseAlignmentValue(uint64_t Exponent, MaybeAlign &Alignment);
|
||||
Error parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind);
|
||||
Error parseModule(uint64_t ResumeBit, bool ShouldLazyLoadMetadata = false);
|
||||
Error parseModule(
|
||||
uint64_t ResumeBit, bool ShouldLazyLoadMetadata = false,
|
||||
DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
|
||||
|
||||
Error parseComdatRecord(ArrayRef<uint64_t> Record);
|
||||
Error parseGlobalVarRecord(ArrayRef<uint64_t> Record);
|
||||
@ -3419,7 +3424,8 @@ Error BitcodeReader::parseGlobalIndirectSymbolRecord(
|
||||
}
|
||||
|
||||
Error BitcodeReader::parseModule(uint64_t ResumeBit,
|
||||
bool ShouldLazyLoadMetadata) {
|
||||
bool ShouldLazyLoadMetadata,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
if (ResumeBit) {
|
||||
if (Error JumpFailed = Stream.JumpToBit(ResumeBit))
|
||||
return JumpFailed;
|
||||
@ -3442,6 +3448,10 @@ Error BitcodeReader::parseModule(uint64_t ResumeBit,
|
||||
std::string DL = llvm::UpgradeDataLayoutString(
|
||||
TheModule->getDataLayoutStr(), TheModule->getTargetTriple());
|
||||
TheModule->setDataLayout(DL);
|
||||
|
||||
if (auto LayoutOverride =
|
||||
DataLayoutCallback(TheModule->getTargetTriple()))
|
||||
TheModule->setDataLayout(*LayoutOverride);
|
||||
};
|
||||
|
||||
// Read all the records for this module.
|
||||
@ -3695,11 +3705,12 @@ Error BitcodeReader::parseModule(uint64_t ResumeBit,
|
||||
}
|
||||
|
||||
Error BitcodeReader::parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata,
|
||||
bool IsImporting) {
|
||||
bool IsImporting,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
TheModule = M;
|
||||
MDLoader = MetadataLoader(Stream, *M, ValueList, IsImporting,
|
||||
[&](unsigned ID) { return getTypeByID(ID); });
|
||||
return parseModule(0, ShouldLazyLoadMetadata);
|
||||
return parseModule(0, ShouldLazyLoadMetadata, DataLayoutCallback);
|
||||
}
|
||||
|
||||
Error BitcodeReader::typeCheckLoadStoreInst(Type *ValType, Type *PtrType) {
|
||||
@ -4832,7 +4843,11 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
|
||||
MaybeAlign Align;
|
||||
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
|
||||
return Err;
|
||||
I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align);
|
||||
if (!Align && !Ty->isSized())
|
||||
return error("load of unsized type");
|
||||
if (!Align)
|
||||
Align = TheModule->getDataLayout().getABITypeAlign(Ty);
|
||||
I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align);
|
||||
InstructionList.push_back(I);
|
||||
break;
|
||||
}
|
||||
@ -4869,7 +4884,9 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
|
||||
MaybeAlign Align;
|
||||
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
|
||||
return Err;
|
||||
I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align, Ordering, SSID);
|
||||
if (!Align)
|
||||
return error("Alignment missing from atomic load");
|
||||
I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align, Ordering, SSID);
|
||||
InstructionList.push_back(I);
|
||||
break;
|
||||
}
|
||||
@ -6466,7 +6483,8 @@ llvm::getBitcodeFileContents(MemoryBufferRef Buffer) {
|
||||
/// everything.
|
||||
Expected<std::unique_ptr<Module>>
|
||||
BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll,
|
||||
bool ShouldLazyLoadMetadata, bool IsImporting) {
|
||||
bool ShouldLazyLoadMetadata, bool IsImporting,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
BitstreamCursor Stream(Buffer);
|
||||
|
||||
std::string ProducerIdentification;
|
||||
@ -6491,8 +6509,8 @@ BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll,
|
||||
M->setMaterializer(R);
|
||||
|
||||
// Delay parsing Metadata if ShouldLazyLoadMetadata is true.
|
||||
if (Error Err =
|
||||
R->parseBitcodeInto(M.get(), ShouldLazyLoadMetadata, IsImporting))
|
||||
if (Error Err = R->parseBitcodeInto(M.get(), ShouldLazyLoadMetadata,
|
||||
IsImporting, DataLayoutCallback))
|
||||
return std::move(Err);
|
||||
|
||||
if (MaterializeAll) {
|
||||
@ -6510,7 +6528,8 @@ BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll,
|
||||
Expected<std::unique_ptr<Module>>
|
||||
BitcodeModule::getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata,
|
||||
bool IsImporting) {
|
||||
return getModuleImpl(Context, false, ShouldLazyLoadMetadata, IsImporting);
|
||||
return getModuleImpl(Context, false, ShouldLazyLoadMetadata, IsImporting,
|
||||
[](StringRef) { return None; });
|
||||
}
|
||||
|
||||
// Parse the specified bitcode buffer and merge the index into CombinedIndex.
|
||||
@ -6676,19 +6695,21 @@ Expected<std::unique_ptr<Module>> llvm::getOwningLazyBitcodeModule(
|
||||
}
|
||||
|
||||
Expected<std::unique_ptr<Module>>
|
||||
BitcodeModule::parseModule(LLVMContext &Context) {
|
||||
return getModuleImpl(Context, true, false, false);
|
||||
BitcodeModule::parseModule(LLVMContext &Context,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
return getModuleImpl(Context, true, false, false, DataLayoutCallback);
|
||||
// TODO: Restore the use-lists to the in-memory state when the bitcode was
|
||||
// written. We must defer until the Module has been fully materialized.
|
||||
}
|
||||
|
||||
Expected<std::unique_ptr<Module>> llvm::parseBitcodeFile(MemoryBufferRef Buffer,
|
||||
LLVMContext &Context) {
|
||||
Expected<std::unique_ptr<Module>>
|
||||
llvm::parseBitcodeFile(MemoryBufferRef Buffer, LLVMContext &Context,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
Expected<BitcodeModule> BM = getSingleModule(Buffer);
|
||||
if (!BM)
|
||||
return BM.takeError();
|
||||
|
||||
return BM->parseModule(Context);
|
||||
return BM->parseModule(Context, DataLayoutCallback);
|
||||
}
|
||||
|
||||
Expected<std::string> llvm::getBitcodeTargetTriple(MemoryBufferRef Buffer) {
|
||||
|
@ -93,7 +93,8 @@ public:
|
||||
/// file.
|
||||
///
|
||||
/// Return null if an error occurred.
|
||||
std::unique_ptr<Module> parseIRModule();
|
||||
std::unique_ptr<Module>
|
||||
parseIRModule(DataLayoutCallbackTy DataLayoutCallback);
|
||||
|
||||
/// Create an empty function with the given name.
|
||||
Function *createDummyFunction(StringRef Name, Module &M);
|
||||
@ -216,13 +217,17 @@ void MIRParserImpl::reportDiagnostic(const SMDiagnostic &Diag) {
|
||||
Context.diagnose(DiagnosticInfoMIRParser(Kind, Diag));
|
||||
}
|
||||
|
||||
std::unique_ptr<Module> MIRParserImpl::parseIRModule() {
|
||||
std::unique_ptr<Module>
|
||||
MIRParserImpl::parseIRModule(DataLayoutCallbackTy DataLayoutCallback) {
|
||||
if (!In.setCurrentDocument()) {
|
||||
if (In.error())
|
||||
return nullptr;
|
||||
// Create an empty module when the MIR file is empty.
|
||||
NoMIRDocuments = true;
|
||||
return std::make_unique<Module>(Filename, Context);
|
||||
auto M = std::make_unique<Module>(Filename, Context);
|
||||
if (auto LayoutOverride = DataLayoutCallback(M->getTargetTriple()))
|
||||
M->setDataLayout(*LayoutOverride);
|
||||
return M;
|
||||
}
|
||||
|
||||
std::unique_ptr<Module> M;
|
||||
@ -232,7 +237,7 @@ std::unique_ptr<Module> MIRParserImpl::parseIRModule() {
|
||||
dyn_cast_or_null<yaml::BlockScalarNode>(In.getCurrentNode())) {
|
||||
SMDiagnostic Error;
|
||||
M = parseAssembly(MemoryBufferRef(BSN->getValue(), Filename), Error,
|
||||
Context, &IRSlots, /*UpgradeDebugInfo=*/false);
|
||||
Context, &IRSlots, DataLayoutCallback);
|
||||
if (!M) {
|
||||
reportDiagnostic(diagFromBlockStringDiag(Error, BSN->getSourceRange()));
|
||||
return nullptr;
|
||||
@ -243,6 +248,8 @@ std::unique_ptr<Module> MIRParserImpl::parseIRModule() {
|
||||
} else {
|
||||
// Create an new, empty module.
|
||||
M = std::make_unique<Module>(Filename, Context);
|
||||
if (auto LayoutOverride = DataLayoutCallback(M->getTargetTriple()))
|
||||
M->setDataLayout(*LayoutOverride);
|
||||
NoLLVMIR = true;
|
||||
}
|
||||
return M;
|
||||
@ -933,8 +940,9 @@ MIRParser::MIRParser(std::unique_ptr<MIRParserImpl> Impl)
|
||||
|
||||
MIRParser::~MIRParser() {}
|
||||
|
||||
std::unique_ptr<Module> MIRParser::parseIRModule() {
|
||||
return Impl->parseIRModule();
|
||||
std::unique_ptr<Module>
|
||||
MIRParser::parseIRModule(DataLayoutCallbackTy DataLayoutCallback) {
|
||||
return Impl->parseIRModule(DataLayoutCallback);
|
||||
}
|
||||
|
||||
bool MIRParser::parseMachineFunctions(Module &M, MachineModuleInfo &MMI) {
|
||||
|
@ -67,15 +67,14 @@ std::unique_ptr<Module> llvm::getLazyIRFileModule(StringRef Filename,
|
||||
|
||||
std::unique_ptr<Module> llvm::parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
bool UpgradeDebugInfo,
|
||||
StringRef DataLayoutString) {
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
NamedRegionTimer T(TimeIRParsingName, TimeIRParsingDescription,
|
||||
TimeIRParsingGroupName, TimeIRParsingGroupDescription,
|
||||
TimePassesIsEnabled);
|
||||
if (isBitcode((const unsigned char *)Buffer.getBufferStart(),
|
||||
(const unsigned char *)Buffer.getBufferEnd())) {
|
||||
Expected<std::unique_ptr<Module>> ModuleOrErr =
|
||||
parseBitcodeFile(Buffer, Context);
|
||||
parseBitcodeFile(Buffer, Context, DataLayoutCallback);
|
||||
if (Error E = ModuleOrErr.takeError()) {
|
||||
handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
|
||||
Err = SMDiagnostic(Buffer.getBufferIdentifier(), SourceMgr::DK_Error,
|
||||
@ -83,19 +82,15 @@ std::unique_ptr<Module> llvm::parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err,
|
||||
});
|
||||
return nullptr;
|
||||
}
|
||||
if (!DataLayoutString.empty())
|
||||
ModuleOrErr.get()->setDataLayout(DataLayoutString);
|
||||
return std::move(ModuleOrErr.get());
|
||||
}
|
||||
|
||||
return parseAssembly(Buffer, Err, Context, nullptr, UpgradeDebugInfo,
|
||||
DataLayoutString);
|
||||
return parseAssembly(Buffer, Err, Context, nullptr, DataLayoutCallback);
|
||||
}
|
||||
|
||||
std::unique_ptr<Module> llvm::parseIRFile(StringRef Filename, SMDiagnostic &Err,
|
||||
LLVMContext &Context,
|
||||
bool UpgradeDebugInfo,
|
||||
StringRef DataLayoutString) {
|
||||
std::unique_ptr<Module>
|
||||
llvm::parseIRFile(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
|
||||
DataLayoutCallbackTy DataLayoutCallback) {
|
||||
ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
|
||||
MemoryBuffer::getFileOrSTDIN(Filename);
|
||||
if (std::error_code EC = FileOrErr.getError()) {
|
||||
@ -105,7 +100,7 @@ std::unique_ptr<Module> llvm::parseIRFile(StringRef Filename, SMDiagnostic &Err,
|
||||
}
|
||||
|
||||
return parseIR(FileOrErr.get()->getMemBufferRef(), Err, Context,
|
||||
UpgradeDebugInfo, DataLayoutString);
|
||||
DataLayoutCallback);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -38,7 +38,7 @@ define i32 @different_array_test(i64 %A, i64 %B) {
|
||||
; USE_ASSUME-NEXT: [[POINTER:%.*]] = getelementptr [100 x i32], [100 x i32]* [[ARRAY11]], i64 0, i64 [[A:%.*]]
|
||||
; USE_ASSUME-NEXT: [[POINTER2:%.*]] = getelementptr [200 x i32], [200 x i32]* [[ARRAY22]], i64 0, i64 [[B:%.*]]
|
||||
; USE_ASSUME-NEXT: store i32 7, i32* [[POINTER2]], align 4
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[POINTER]], i64 4), "nonnull"(i32* [[POINTER]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[POINTER]], i64 4), "nonnull"(i32* [[POINTER]]), "align"(i32* [[POINTER]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%Array1 = alloca i32, i32 100
|
||||
@ -78,7 +78,7 @@ define i32 @constant_array_index_test() {
|
||||
; USE_ASSUME-NEXT: [[P1:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 7
|
||||
; USE_ASSUME-NEXT: [[P2:%.*]] = getelementptr inbounds [100 x i32], [100 x i32]* [[ARRAY1]], i64 0, i64 6
|
||||
; USE_ASSUME-NEXT: store i32 1, i32* [[P2]], align 4
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P1]], i64 4), "nonnull"(i32* [[P1]]), "align"(i32* [[P1]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%Array = alloca i32, i32 100
|
||||
@ -105,7 +105,7 @@ define i32 @gep_distance_test(i32* %A) {
|
||||
; USE_ASSUME-LABEL: @gep_distance_test(
|
||||
; USE_ASSUME-NEXT: [[B:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 2
|
||||
; USE_ASSUME-NEXT: store i32 7, i32* [[B]], align 4
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A]], i64 4), "nonnull"(i32* [[A]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A]], i64 4), "nonnull"(i32* [[A]]), "align"(i32* [[A]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%REMOVEu = load i32, i32* %A
|
||||
@ -128,7 +128,7 @@ define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) {
|
||||
; USE_ASSUME-NEXT: [[A1:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[A:%.*]], i64 0, i32 0
|
||||
; USE_ASSUME-NEXT: [[B:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[A]], i64 [[DISTANCE:%.*]], i32 1
|
||||
; USE_ASSUME-NEXT: store i32 7, i32* [[B]], align 4
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A1]], i64 4), "nonnull"(i32* [[A1]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A1]], i64 4), "nonnull"(i32* [[A1]]), "align"(i32* [[A1]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%A1 = getelementptr {i32,i32}, {i32,i32}* %A, i64 0, i32 0
|
||||
@ -153,7 +153,7 @@ define i32 @gep_distance_test3(i32 * %A) {
|
||||
; USE_ASSUME-NEXT: [[C1:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 1
|
||||
; USE_ASSUME-NEXT: [[C:%.*]] = bitcast i32* [[C1]] to i8*
|
||||
; USE_ASSUME-NEXT: store i8 42, i8* [[C]], align 1
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A]], i64 4), "nonnull"(i32* [[A]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A]], i64 4), "nonnull"(i32* [[A]]), "align"(i32* [[A]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%X = load i32, i32* %A
|
||||
@ -177,7 +177,7 @@ define i32 @constexpr_test() {
|
||||
; USE_ASSUME-NEXT: [[X:%.*]] = alloca i32, align 4
|
||||
; USE_ASSUME-NEXT: call void @external(i32* nonnull [[X]])
|
||||
; USE_ASSUME-NEXT: store i32 5, i32* getelementptr inbounds ({ i32 }, { i32 }* @Global, i64 0, i32 0), align 4
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[X]], i64 4), "nonnull"(i32* [[X]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[X]], i64 4), "nonnull"(i32* [[X]]), "align"(i32* [[X]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%X = alloca i32
|
||||
|
@ -64,7 +64,7 @@ exit:
|
||||
define i8 @test2(i1 %cmp, i8 *%p) {
|
||||
; CHECK-LABEL: @test2(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]]
|
||||
; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]], align 1
|
||||
; CHECK-NEXT: call void @foo(i8* [[P]])
|
||||
; CHECK-NEXT: br i1 [[CMP:%.*]], label [[B2:%.*]], label [[B1:%.*]]
|
||||
; CHECK: b1:
|
||||
@ -102,7 +102,7 @@ alive:
|
||||
define i8 @test3(i1 %cmp, i8 *%p) {
|
||||
; CHECK-LABEL: @test3(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]]
|
||||
; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]], align 1
|
||||
; CHECK-NEXT: call void @foo(i8* [[P]])
|
||||
; CHECK-NEXT: br i1 [[CMP:%.*]], label [[B1:%.*]], label [[B2:%.*]]
|
||||
; CHECK: b1:
|
||||
|
@ -348,7 +348,7 @@ define i8 @unoptimizable2() {
|
||||
; CHECK-NEXT: call void @use(i8* %ptr3)
|
||||
call void @use(i8* %ptr3)
|
||||
; CHECK: MemoryUse(7)
|
||||
; CHECK-NEXT: %v = load i8, i8* %ptr3, !invariant.group !0
|
||||
; CHECK-NEXT: %v = load i8, i8* %ptr3, align 1, !invariant.group !0
|
||||
%v = load i8, i8* %ptr3, !invariant.group !0
|
||||
ret i8 %v
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ define i1 @header_with_icf(i32* noalias %p, i32 %high) {
|
||||
; CHECK-LABEL: @header_with_icf(
|
||||
; CHECK-LABEL: loop:
|
||||
; CHECK: %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] ; (mustexec in: loop)
|
||||
; CHECK: %v = load i32, i32* %p ; (mustexec in: loop)
|
||||
; CHECK: %v = load i32, i32* %p, align 4 ; (mustexec in: loop)
|
||||
; CHECK: call void @maythrow_and_use(i32 %v) ; (mustexec in: loop)
|
||||
; CHECK-NOT: mustexec
|
||||
|
||||
@ -28,7 +28,7 @@ define i1 @split_header(i32* noalias %p, i32 %high) {
|
||||
; CHECK-LABEL: @split_header(
|
||||
; CHECK-LABEL: loop:
|
||||
; CHECK: %iv = phi i32 [ 0, %entry ], [ %iv.next, %next ] ; (mustexec in: loop)
|
||||
; CHECK: %v = load i32, i32* %p ; (mustexec in: loop)
|
||||
; CHECK: %v = load i32, i32* %p, align 4 ; (mustexec in: loop)
|
||||
; CHECK: br label %next ; (mustexec in: loop)
|
||||
; CHECK-NOT: mustexec
|
||||
entry:
|
||||
@ -56,7 +56,7 @@ define i1 @nested(i32* noalias %p, i32 %high) {
|
||||
; CHECK: %iv = phi i32 [ 0, %entry ], [ %iv.next, %next ] ; (mustexec in: loop)
|
||||
; CHECK: br label %inner_loop ; (mustexec in: loop)
|
||||
; CHECK-LABEL: inner_loop:
|
||||
; CHECK: %v = load i32, i32* %p ; (mustexec in: inner_loop)
|
||||
; CHECK: %v = load i32, i32* %p, align 4 ; (mustexec in: inner_loop)
|
||||
; CHECK: %inner.test = icmp eq i32 %v, 0 ; (mustexec in: inner_loop)
|
||||
; CHECK: br i1 %inner.test, label %inner_loop, label %next ; (mustexec in: inner_loop)
|
||||
; CHECK-NOT: mustexec
|
||||
@ -89,7 +89,7 @@ define i1 @nested_no_throw(i32* noalias %p, i32 %high) {
|
||||
; CHECK: %iv = phi i32 [ 0, %entry ], [ %iv.next, %next ] ; (mustexec in: loop)
|
||||
; CHECK: br label %inner_loop ; (mustexec in: loop)
|
||||
; CHECK-LABEL: inner_loop:
|
||||
; CHECK: %v = load i32, i32* %p ; (mustexec in 2 loops: inner_loop, loop)
|
||||
; CHECK: %v = load i32, i32* %p, align 4 ; (mustexec in 2 loops: inner_loop, loop)
|
||||
; CHECK: %inner.test = icmp eq i32 %v, 0 ; (mustexec in 2 loops: inner_loop, loop)
|
||||
; CHECK: br i1 %inner.test, label %inner_loop, label %next ; (mustexec in 2 loops: inner_loop, loop)
|
||||
; CHECK-LABEL: next:
|
||||
@ -127,7 +127,7 @@ define i1 @nothrow_loop(i32* noalias %p, i32 %high) {
|
||||
; CHECK: %iv = phi i32 [ 0, %entry ], [ %iv.next, %next ] ; (mustexec in: loop)
|
||||
; CHECK: br label %next ; (mustexec in: loop)
|
||||
; CHECK-LABEL: next:
|
||||
; CHECK: %v = load i32, i32* %p ; (mustexec in: loop)
|
||||
; CHECK: %v = load i32, i32* %p, align 4 ; (mustexec in: loop)
|
||||
; CHECK: %iv.next = add nuw nsw i32 %iv, 1 ; (mustexec in: loop)
|
||||
; CHECK: %exit.test = icmp slt i32 %iv, %high ; (mustexec in: loop)
|
||||
; CHECK: br i1 %exit.test, label %exit, label %loop ; (mustexec in: loop)
|
||||
|
@ -7,7 +7,7 @@ define void @f0(i8* %len_addr) {
|
||||
entry:
|
||||
%len = load i8, i8* %len_addr, !range !0
|
||||
%len_norange = load i8, i8* %len_addr
|
||||
; CHECK: %len = load i8, i8* %len_addr, !range !0
|
||||
; CHECK: %len = load i8, i8* %len_addr, align 1, !range !0
|
||||
; CHECK-NEXT: --> %len U: [0,127) S: [0,127)
|
||||
; CHECK: %len_norange = load i8, i8* %len_addr
|
||||
; CHECK-NEXT: --> %len_norange U: full-set S: full-set
|
||||
@ -48,7 +48,7 @@ define void @f1(i8* %len_addr) {
|
||||
entry:
|
||||
%len = load i8, i8* %len_addr, !range !0
|
||||
%len_norange = load i8, i8* %len_addr
|
||||
; CHECK: %len = load i8, i8* %len_addr, !range !0
|
||||
; CHECK: %len = load i8, i8* %len_addr, align 1, !range !0
|
||||
; CHECK-NEXT: --> %len U: [0,127) S: [0,127)
|
||||
; CHECK: %len_norange = load i8, i8* %len_addr
|
||||
; CHECK-NEXT: --> %len_norange U: full-set S: full-set
|
||||
@ -89,7 +89,7 @@ define void @f2(i8* %len_addr) {
|
||||
entry:
|
||||
%len = load i8, i8* %len_addr, !range !0
|
||||
%len_norange = load i8, i8* %len_addr
|
||||
; CHECK: %len = load i8, i8* %len_addr, !range !0
|
||||
; CHECK: %len = load i8, i8* %len_addr, align 1, !range !0
|
||||
; CHECK-NEXT: --> %len U: [0,127) S: [0,127)
|
||||
; CHECK: %len_norange = load i8, i8* %len_addr
|
||||
; CHECK-NEXT: --> %len_norange U: full-set S: full-set
|
||||
|
@ -1,7 +1,7 @@
|
||||
; RUN: opt -tbaa -sink -S < %s | FileCheck %s
|
||||
|
||||
; CHECK: a:
|
||||
; CHECK: %f = load float, float* %p, !tbaa [[TAGA:!.*]]
|
||||
; CHECK: %f = load float, float* %p, align 4, !tbaa [[TAGA:!.*]]
|
||||
; CHECK: store float %f, float* %q
|
||||
|
||||
define void @foo(float* %p, i1 %c, float* %q, float* %r) {
|
||||
|
@ -59,7 +59,7 @@ define void @f_2(i8* align 4 dereferenceable_or_null(1024) %ptr) {
|
||||
; CHECK-LABEL: @f_2(
|
||||
; CHECK-NOT: load
|
||||
; CHECK: call void @use(i32 0)
|
||||
; CHECK-NEXT: %val = load i32, i32* %ptr.i32, !invariant.load !0
|
||||
; CHECK-NEXT: %val = load i32, i32* %ptr.i32, align 4, !invariant.load !0
|
||||
; CHECK-NEXT: call void @use(i32 %val)
|
||||
|
||||
entry:
|
||||
|
@ -39,10 +39,10 @@ entry:
|
||||
; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1
|
||||
%res4 = load volatile i8, i8* %ptr1, align 1
|
||||
|
||||
; CHECK-NEXT: %res5 = load i8, i8* %ptr1, !nontemporal !0
|
||||
; CHECK-NEXT: %res5 = load i8, i8* %ptr1, align 1, !nontemporal !0
|
||||
%res5 = load i8, i8* %ptr1, !nontemporal !0
|
||||
|
||||
; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
|
||||
; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
|
||||
%res6 = load volatile i8, i8* %ptr1, !nontemporal !0
|
||||
|
||||
; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
|
||||
@ -51,10 +51,10 @@ entry:
|
||||
; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
|
||||
%res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
|
||||
|
||||
; CHECK-NEXT: %res9 = load i8, i8* %ptr1, !invariant.load !1
|
||||
; CHECK-NEXT: %res9 = load i8, i8* %ptr1, align 1, !invariant.load !1
|
||||
%res9 = load i8, i8* %ptr1, !invariant.load !1
|
||||
|
||||
; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
|
||||
; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
|
||||
%res10 = load volatile i8, i8* %ptr1, !invariant.load !1
|
||||
|
||||
; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
|
||||
|
@ -4,12 +4,12 @@
|
||||
; Check that strided access metadata is added to loads in inner loops when compiling for Falkor.
|
||||
|
||||
; CHECK-LABEL: @hwpf1(
|
||||
; CHECK: load i32, i32* %gep, !falkor.strided.access !0
|
||||
; CHECK: load i32, i32* %gep2, !falkor.strided.access !0
|
||||
; CHECK: load i32, i32* %gep, align 4, !falkor.strided.access !0
|
||||
; CHECK: load i32, i32* %gep2, align 4, !falkor.strided.access !0
|
||||
|
||||
; NOHWPF-LABEL: @hwpf1(
|
||||
; NOHWPF: load i32, i32* %gep{{$}}
|
||||
; NOHWPF: load i32, i32* %gep2{{$}}
|
||||
; NOHWPF: load i32, i32* %gep, align 4{{$}}
|
||||
; NOHWPF: load i32, i32* %gep2, align 4{{$}}
|
||||
define void @hwpf1(i32* %p, i32* %p2) {
|
||||
entry:
|
||||
br label %loop
|
||||
@ -33,12 +33,12 @@ exit:
|
||||
|
||||
; Check that outer loop strided load isn't marked.
|
||||
; CHECK-LABEL: @hwpf2(
|
||||
; CHECK: load i32, i32* %gep, !falkor.strided.access !0
|
||||
; CHECK: load i32, i32* %gep2{{$}}
|
||||
; CHECK: load i32, i32* %gep, align 4, !falkor.strided.access !0
|
||||
; CHECK: load i32, i32* %gep2, align 4{{$}}
|
||||
|
||||
; NOHWPF-LABEL: @hwpf2(
|
||||
; NOHWPF: load i32, i32* %gep{{$}}
|
||||
; NOHWPF: load i32, i32* %gep2{{$}}
|
||||
; NOHWPF: load i32, i32* %gep, align 4{{$}}
|
||||
; NOHWPF: load i32, i32* %gep2, align 4{{$}}
|
||||
define void @hwpf2(i32* %p) {
|
||||
entry:
|
||||
br label %loop1
|
||||
@ -78,12 +78,12 @@ exit:
|
||||
|
||||
; Check that non-strided load isn't marked.
|
||||
; CHECK-LABEL: @hwpf3(
|
||||
; CHECK: load i32, i32* %gep, !falkor.strided.access !0
|
||||
; CHECK: load i32, i32* %gep2{{$}}
|
||||
; CHECK: load i32, i32* %gep, align 4, !falkor.strided.access !0
|
||||
; CHECK: load i32, i32* %gep2, align 4{{$}}
|
||||
|
||||
; NOHWPF-LABEL: @hwpf3(
|
||||
; NOHWPF: load i32, i32* %gep{{$}}
|
||||
; NOHWPF: load i32, i32* %gep2{{$}}
|
||||
; NOHWPF: load i32, i32* %gep, align 4{{$}}
|
||||
; NOHWPF: load i32, i32* %gep2, align 4{{$}}
|
||||
define void @hwpf3(i32* %p, i32* %p2) {
|
||||
entry:
|
||||
br label %loop
|
||||
|
@ -1,16 +1,19 @@
|
||||
; RUN: not llvm-as -data-layout=A5 < %s 2>&1 | FileCheck -check-prefixes=COMMON,AS %s
|
||||
; RUN: not llc -mtriple amdgcn-amd-amdhsa < %s 2>&1 | FileCheck -check-prefixes=COMMON,LLC %s
|
||||
; RUN: llvm-as < %s | not llc -mtriple amdgcn-amd-amdhsa 2>&1 | FileCheck -check-prefixes=COMMON,LLC %s
|
||||
; RUN: llvm-as < %s | not llc -mtriple amdgcn-amd-amdhsa 2>&1 | FileCheck -check-prefixes=MISMATCH %s
|
||||
; RUN: not opt -data-layout=A5 -S < %s 2>&1 | FileCheck -check-prefixes=COMMON,LLC %s
|
||||
; RUN: llvm-as < %s | not opt -data-layout=A5 2>&1 | FileCheck -check-prefixes=COMMON,LLC %s
|
||||
; RUN: llvm-as < %s | not opt -data-layout=A5 2>&1 | FileCheck -check-prefixes=MISMATCH %s
|
||||
|
||||
; AS: assembly parsed, but does not verify as correct!
|
||||
; COMMON: Allocation instruction pointer not in the stack address space!
|
||||
; COMMON: %tmp = alloca i32
|
||||
; MISMATCH: Explicit load/store type does not match pointee type of pointer operand
|
||||
; LLC: error: input module is broken!
|
||||
|
||||
define amdgpu_kernel void @test() {
|
||||
%tmp = alloca i32
|
||||
%tmp2 = alloca i32*
|
||||
store i32* %tmp, i32** %tmp2
|
||||
ret void
|
||||
}
|
||||
|
||||
|
@ -70,11 +70,11 @@ define i32 @accumulate_square_a0(i16* %a, i16* %b, i32 %acc) {
|
||||
; CHECK-NEXT: [[LD_A_0:%.*]] = load i16, i16* [[A]]
|
||||
; CHECK-NEXT: [[SEXT_A_0:%.*]] = sext i16 [[LD_A_0]] to i32
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[ADDR_A_1]] to i32*
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = sext i16 [[TMP2]] to i32
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16* [[ADDR_B_1]] to i32*
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 2
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = trunc i32 [[TMP5]] to i16
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = sext i16 [[TMP6]] to i32
|
||||
; CHECK-NEXT: [[MUL_0:%.*]] = mul i32 [[SEXT_A_0]], [[SEXT_A_0]]
|
||||
@ -115,12 +115,12 @@ define i32 @accumulate_square_a2(i16* %a, i16* %b, i32 %acc) {
|
||||
; CHECK-LABEL: @accumulate_square_a2(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[A:%.*]] to i32*
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i16
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = sext i16 [[TMP3]] to i32
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16* [[B:%.*]] to i32*
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 2
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = lshr i32 [[TMP6]], 16
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = trunc i32 [[TMP7]] to i16
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = sext i16 [[TMP8]] to i32
|
||||
|
@ -169,7 +169,7 @@ define void @testcallprologue() {
|
||||
ret void
|
||||
}
|
||||
|
||||
define i32 @icall(i32 (i32)* %foo) {
|
||||
define i32 @icall(i32 (i32) addrspace(1)* %foo) {
|
||||
; CHECK-LABEL: icall:
|
||||
; CHECK: movw r30, r24
|
||||
; CHECK: ldi r22, 147
|
||||
|
@ -1,6 +1,6 @@
|
||||
; RUN: llc -mattr=lpm,lpmw < %s -march=avr | FileCheck %s
|
||||
|
||||
@callbackPtr = common global void (i16)* null, align 8
|
||||
@callbackPtr = common global void (i16) addrspace(1)* null, align 8
|
||||
@myValuePtr = common global i16* null, align 8
|
||||
|
||||
@externalConstant = external global i16, align 2
|
||||
@ -13,7 +13,7 @@ define void @loadCallbackPtr() {
|
||||
entry:
|
||||
; CHECK: ldi r{{[0-9]+}}, pm_lo8(externalFunction)
|
||||
; CHECK-NEXT: ldi r{{[0-9]+}}, pm_hi8(externalFunction)
|
||||
store void (i16)* @externalFunction, void (i16)** @callbackPtr, align 8
|
||||
store void (i16) addrspace(1)* @externalFunction, void (i16) addrspace(1)** @callbackPtr, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
@str.1b = external constant [0 x i8]
|
||||
|
||||
define void @"TryFromIntError::Debug"(%"fmt::Formatter"* dereferenceable(32)) unnamed_addr #0 personality i32 (...)* @rust_eh_personality {
|
||||
define void @"TryFromIntError::Debug"(%"fmt::Formatter"* dereferenceable(32)) unnamed_addr #0 personality i32 (...) addrspace(1)* @rust_eh_personality {
|
||||
; CHECK-LABEL: "TryFromIntError::Debug"
|
||||
start:
|
||||
%builder = alloca i8, align 8
|
||||
@ -14,8 +14,8 @@ start:
|
||||
%4 = getelementptr inbounds %"fmt::Formatter", %"fmt::Formatter"* %0, i16 0, i32 1, i32 1
|
||||
%5 = load void (i8*)**, void (i8*)*** %4, align 2
|
||||
%6 = getelementptr inbounds void (i8*)*, void (i8*)** %5, i16 3
|
||||
%7 = bitcast void (i8*)** %6 to i8 ({}*, i8*, i16)**
|
||||
%8 = load i8 ({}*, i8*, i16)*, i8 ({}*, i8*, i16)** %7, align 2
|
||||
%7 = bitcast void (i8*)** %6 to i8 ({}*, i8*, i16) addrspace(1)**
|
||||
%8 = load i8 ({}*, i8*, i16) addrspace(1)*, i8 ({}*, i8*, i16) addrspace(1)** %7, align 2
|
||||
%9 = tail call i8 %8({}* nonnull %3, i8* noalias nonnull readonly getelementptr inbounds ([0 x i8], [0 x i8]* @str.1b, i16 0, i16 0), i16 15)
|
||||
unreachable
|
||||
}
|
||||
|
@ -6,16 +6,16 @@
|
||||
@str.4S = external constant [5 x i8]
|
||||
|
||||
; Function Attrs: uwtable
|
||||
define void @"_ZN65_$LT$lib..str..Chars$LT$$u27$a$GT$$u20$as$u20$lib..fmt..Debug$GT$3fmt17h76a537e22649f739E"(%"fmt::Formatter.1.77.153.229.305.381.1673"* dereferenceable(27) %__arg_0) unnamed_addr #0 personality i32 (...)* @rust_eh_personality {
|
||||
define void @"_ZN65_$LT$lib..str..Chars$LT$$u27$a$GT$$u20$as$u20$lib..fmt..Debug$GT$3fmt17h76a537e22649f739E"(%"fmt::Formatter.1.77.153.229.305.381.1673"* dereferenceable(27) %__arg_0) unnamed_addr #0 personality i32 (...) addrspace(1)* @rust_eh_personality {
|
||||
; CHECK-LABEL: "_ZN65_$LT$lib..str..Chars$LT$$u27$a$GT$$u20$as$u20$lib..fmt..Debug$GT$3fmt17h76a537e22649f739E"
|
||||
start:
|
||||
%0 = getelementptr inbounds %"fmt::Formatter.1.77.153.229.305.381.1673", %"fmt::Formatter.1.77.153.229.305.381.1673"* %__arg_0, i16 0, i32 11, i32 0
|
||||
%1 = load {}*, {}** %0, align 1, !noalias !0, !nonnull !9
|
||||
%2 = getelementptr inbounds %"fmt::Formatter.1.77.153.229.305.381.1673", %"fmt::Formatter.1.77.153.229.305.381.1673"* %__arg_0, i16 0, i32 11, i32 1
|
||||
%3 = bitcast {}** %2 to i1 ({}*, [0 x i8]*, i16)***
|
||||
%4 = load i1 ({}*, [0 x i8]*, i16)**, i1 ({}*, [0 x i8]*, i16)*** %3, align 1, !noalias !0, !nonnull !9
|
||||
%5 = getelementptr inbounds i1 ({}*, [0 x i8]*, i16)*, i1 ({}*, [0 x i8]*, i16)** %4, i16 3
|
||||
%6 = load i1 ({}*, [0 x i8]*, i16)*, i1 ({}*, [0 x i8]*, i16)** %5, align 1, !invariant.load !9, !noalias !0, !nonnull !9
|
||||
%3 = bitcast {}** %2 to i1 ({}*, [0 x i8]*, i16) addrspace(1)***
|
||||
%4 = load i1 ({}*, [0 x i8]*, i16) addrspace(1)**, i1 ({}*, [0 x i8]*, i16) addrspace(1)*** %3, align 1, !noalias !0, !nonnull !9
|
||||
%5 = getelementptr inbounds i1 ({}*, [0 x i8]*, i16) addrspace(1)*, i1 ({}*, [0 x i8]*, i16) addrspace(1)** %4, i16 3
|
||||
%6 = load i1 ({}*, [0 x i8]*, i16) addrspace(1)*, i1 ({}*, [0 x i8]*, i16) addrspace(1)** %5, align 1, !invariant.load !9, !noalias !0, !nonnull !9
|
||||
%7 = tail call zeroext i1 %6({}* nonnull %1, [0 x i8]* noalias nonnull readonly bitcast ([5 x i8]* @str.4S to [0 x i8]*), i16 5), !noalias !10
|
||||
unreachable
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ entry:
|
||||
br label %0, !md !1
|
||||
|
||||
; <label:0>
|
||||
; CHECK: %1 = load i32, i32* %retval, !md !2
|
||||
; CHECK: %1 = load i32, i32* %retval, align 4, !md !2
|
||||
; CHECK: ret i32 %1, !md !3
|
||||
%1 = load i32, i32* %retval, !md !2
|
||||
ret i32 %1, !md !3
|
||||
|
@ -10,14 +10,14 @@
|
||||
; RUN: %t.bc -disable-verify 2>&1 | \
|
||||
; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-WARN
|
||||
; ---- Thin LTO (optimize, strip main file) -----------------
|
||||
; RUN: opt -disable-verify -module-summary %s -o %t.bc
|
||||
; RUN: opt -disable-verify -module-summary %S/Inputs/strip-debug-info-bar.ll \
|
||||
; RUN: opt -disable-verify -disable-upgrade-debug-info -module-summary %s -o %t.bc
|
||||
; RUN: opt -disable-verify -disable-upgrade-debug-info -module-summary %S/Inputs/strip-debug-info-bar.ll \
|
||||
; RUN: -o %t2.bc
|
||||
; RUN: llvm-lto -thinlto -thinlto-action=run \
|
||||
; RUN: %t.bc -disable-verify 2>&1 | \
|
||||
; RUN: FileCheck %s -allow-empty -check-prefix=CHECK-WARN
|
||||
; ---- Thin LTO (optimize, strip imported file) -------------
|
||||
; RUN: opt -disable-verify -strip-debug -module-summary %t.bc -o %t-stripped.bc
|
||||
; RUN: opt -module-summary %t.bc -o %t-stripped.bc
|
||||
; RUN: llvm-lto -thinlto-action=thinlink -o %t.index.bc %t-stripped.bc %t2.bc
|
||||
; RUN: llvm-lto -thinlto -thinlto-action=import \
|
||||
; RUN: -thinlto-index=%t.index.bc \
|
||||
|
@ -13,7 +13,7 @@ define internal i32 @callee(i1 %C, i32* %P) {
|
||||
; CHECK: T:
|
||||
; CHECK-NEXT: ret i32 17
|
||||
; CHECK: F:
|
||||
; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[P]], align 1
|
||||
; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: ret i32 [[X]]
|
||||
;
|
||||
entry:
|
||||
|
@ -7,7 +7,7 @@ define void @foo() {
|
||||
enter:
|
||||
; CHECK-NOT: !invariant.group
|
||||
; CHECK-NOT: @llvm.launder.invariant.group.p0i8(
|
||||
; CHECK: %val = load i8, i8* @tmp{{$}}
|
||||
; CHECK: %val = load i8, i8* @tmp, align 1{{$}}
|
||||
%val = load i8, i8* @tmp, !invariant.group !0
|
||||
%ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
|
||||
|
||||
@ -23,7 +23,7 @@ define void @foo2() {
|
||||
enter:
|
||||
; CHECK-NOT: !invariant.group
|
||||
; CHECK-NOT: @llvm.strip.invariant.group.p0i8(
|
||||
; CHECK: %val = load i8, i8* @tmp{{$}}
|
||||
; CHECK: %val = load i8, i8* @tmp, align 1{{$}}
|
||||
%val = load i8, i8* @tmp, !invariant.group !0
|
||||
%ptr = call i8* @llvm.strip.invariant.group.p0i8(i8* @tmp)
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
define void @test1(i8* %ptr) {
|
||||
; CHECK-LABEL: @test1(
|
||||
; CHECK-NEXT: [[A:%.*]] = load i8, i8* [[PTR:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = load i8, i8* [[PTR:%.*]], align 1
|
||||
; CHECK-NEXT: br label [[BB:%.*]]
|
||||
; CHECK: bb:
|
||||
; CHECK-NEXT: ret void
|
||||
@ -17,7 +17,7 @@ bb:
|
||||
|
||||
define void @test1_no_null_opt(i8* %ptr) #0 {
|
||||
; CHECK-LABEL: @test1_no_null_opt(
|
||||
; CHECK-NEXT: [[A:%.*]] = load i8, i8* [[PTR:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = load i8, i8* [[PTR:%.*]], align 1
|
||||
; CHECK-NEXT: br label [[BB:%.*]]
|
||||
; CHECK: bb:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i8* [[PTR]], null
|
||||
@ -309,7 +309,7 @@ define void @test12(i8* %arg1, i8** %arg2) {
|
||||
; CHECK: non_null:
|
||||
; CHECK-NEXT: br label [[MERGE:%.*]]
|
||||
; CHECK: null:
|
||||
; CHECK-NEXT: [[ANOTHER_ARG:%.*]] = load i8*, i8** [[ARG2:%.*]], !nonnull !0
|
||||
; CHECK-NEXT: [[ANOTHER_ARG:%.*]] = load i8*, i8** [[ARG2:%.*]], align 8, !nonnull !0
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: [[MERGED_ARG:%.*]] = phi i8* [ [[ANOTHER_ARG]], [[NULL]] ], [ [[ARG1]], [[NON_NULL]] ]
|
||||
|
@ -8,7 +8,7 @@ define i8* @simplify_phi_common_value_op0(i8* %ptr, i32* %b) {
|
||||
; CHECK-NEXT: [[ISNULL:%.*]] = icmp eq i8* [[PTR:%.*]], null
|
||||
; CHECK-NEXT: br i1 [[ISNULL]], label [[RETURN:%.*]], label [[ELSE:%.*]]
|
||||
; CHECK: else:
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]]
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]], align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LB]], 1
|
||||
; CHECK-NEXT: store i32 [[ADD]], i32* [[B]]
|
||||
; CHECK-NEXT: br label [[RETURN]]
|
||||
@ -36,7 +36,7 @@ define i8* @simplify_phi_common_value_op1(i8* %ptr, i32* %b) {
|
||||
; CHECK-NEXT: [[ISNULL:%.*]] = icmp eq i8* [[PTR:%.*]], null
|
||||
; CHECK-NEXT: br i1 [[ISNULL]], label [[RETURN:%.*]], label [[ELSE:%.*]]
|
||||
; CHECK: else:
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]]
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]], align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LB]], 1
|
||||
; CHECK-NEXT: store i32 [[ADD]], i32* [[B]]
|
||||
; CHECK-NEXT: br label [[RETURN]]
|
||||
@ -67,7 +67,7 @@ define i8 @simplify_phi_multiple_constants(i8 %x, i32* %b) {
|
||||
; CHECK-NEXT: [[IS42:%.*]] = icmp eq i8 [[X]], 42
|
||||
; CHECK-NEXT: br i1 [[IS42]], label [[RETURN]], label [[ELSE2:%.*]]
|
||||
; CHECK: else2:
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]]
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]], align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LB]], 1
|
||||
; CHECK-NEXT: store i32 [[ADD]], i32* [[B]]
|
||||
; CHECK-NEXT: br label [[RETURN]]
|
||||
@ -100,7 +100,7 @@ define i8* @simplify_phi_common_value_from_instruction(i8* %ptr_op, i32* %b, i32
|
||||
; CHECK-NEXT: [[ISNULL:%.*]] = icmp eq i8* [[PTR]], null
|
||||
; CHECK-NEXT: br i1 [[ISNULL]], label [[RETURN:%.*]], label [[ELSE:%.*]]
|
||||
; CHECK: else:
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]]
|
||||
; CHECK-NEXT: [[LB:%.*]] = load i32, i32* [[B:%.*]], align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LB]], 1
|
||||
; CHECK-NEXT: store i32 [[ADD]], i32* [[B]]
|
||||
; CHECK-NEXT: br label [[RETURN]]
|
||||
|
@ -240,7 +240,7 @@ sw.default:
|
||||
|
||||
define i1 @test8(i64* %p) {
|
||||
; CHECK-LABEL: @test8(
|
||||
; CHECK-NEXT: [[A:%.*]] = load i64, i64* [[P:%.*]], !range !0
|
||||
; CHECK-NEXT: [[A:%.*]] = load i64, i64* [[P:%.*]], align 4, !range !0
|
||||
; CHECK-NEXT: [[RES:%.*]] = icmp eq i64 [[A]], 0
|
||||
; CHECK-NEXT: ret i1 false
|
||||
;
|
||||
@ -251,7 +251,7 @@ define i1 @test8(i64* %p) {
|
||||
|
||||
define i1 @test9(i64* %p) {
|
||||
; CHECK-LABEL: @test9(
|
||||
; CHECK-NEXT: [[A:%.*]] = load i64, i64* [[P:%.*]], !range !1
|
||||
; CHECK-NEXT: [[A:%.*]] = load i64, i64* [[P:%.*]], align 4, !range !1
|
||||
; CHECK-NEXT: [[RES:%.*]] = icmp eq i64 [[A]], 0
|
||||
; CHECK-NEXT: ret i1 true
|
||||
;
|
||||
@ -262,7 +262,7 @@ define i1 @test9(i64* %p) {
|
||||
|
||||
define i1 @test10(i64* %p) {
|
||||
; CHECK-LABEL: @test10(
|
||||
; CHECK-NEXT: [[A:%.*]] = load i64, i64* [[P:%.*]], !range !2
|
||||
; CHECK-NEXT: [[A:%.*]] = load i64, i64* [[P:%.*]], align 4, !range !2
|
||||
; CHECK-NEXT: [[RES:%.*]] = icmp eq i64 [[A]], 0
|
||||
; CHECK-NEXT: ret i1 false
|
||||
;
|
||||
@ -275,7 +275,7 @@ define i1 @test10(i64* %p) {
|
||||
|
||||
define i1 @test11() {
|
||||
; CHECK-LABEL: @test11(
|
||||
; CHECK-NEXT: [[POSITIVE:%.*]] = load i32, i32* @g, !range !3
|
||||
; CHECK-NEXT: [[POSITIVE:%.*]] = load i32, i32* @g, align 4, !range !3
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[POSITIVE]], 1
|
||||
; CHECK-NEXT: [[TEST:%.*]] = icmp sgt i32 [[ADD]], 0
|
||||
; CHECK-NEXT: br label [[NEXT:%.*]]
|
||||
@ -657,7 +657,7 @@ else:
|
||||
@limit = external global i32
|
||||
define i1 @test15(i32 %a) {
|
||||
; CHECK-LABEL: @test15(
|
||||
; CHECK-NEXT: [[LIMIT:%.*]] = load i32, i32* @limit, !range !4
|
||||
; CHECK-NEXT: [[LIMIT:%.*]] = load i32, i32* @limit, align 4, !range !4
|
||||
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[A:%.*]], [[LIMIT]]
|
||||
; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
|
||||
; CHECK: then:
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
define void @test(i32* %P) {
|
||||
; CHECK-LABEL: @test(
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%a = load i32, i32* %P
|
||||
|
@ -54,7 +54,7 @@ define void @test7(i32* noalias %P, i32* noalias %Q) {
|
||||
; CHECK-LABEL: @test7(
|
||||
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
|
||||
; CHECK: bb1:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[P:%.*]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[P:%.*]], align 4
|
||||
; CHECK-NEXT: br label [[BB3:%.*]]
|
||||
; CHECK: bb2:
|
||||
; CHECK-NEXT: br label [[BB3]]
|
||||
@ -80,7 +80,7 @@ define i32 @test22(i32* %P, i32* noalias %Q, i32* %R) {
|
||||
; CHECK-LABEL: @test22(
|
||||
; CHECK-NEXT: store i32 2, i32* [[P:%.*]]
|
||||
; CHECK-NEXT: store i32 3, i32* [[Q:%.*]]
|
||||
; CHECK-NEXT: [[L:%.*]] = load i32, i32* [[R:%.*]]
|
||||
; CHECK-NEXT: [[L:%.*]] = load i32, i32* [[R:%.*]], align 4
|
||||
; CHECK-NEXT: ret i32 [[L]]
|
||||
;
|
||||
store i32 1, i32* %Q
|
||||
@ -122,7 +122,7 @@ define void @overlapping_read(i32* %P) {
|
||||
; CHECK-NEXT: [[P_1:%.*]] = getelementptr i32, i32* [[P]], i32 1
|
||||
; CHECK-NEXT: store i32 1, i32* [[P_1]]
|
||||
; CHECK-NEXT: [[P_64:%.*]] = bitcast i32* [[P]] to i64*
|
||||
; CHECK-NEXT: [[LV:%.*]] = load i64, i64* [[P_64]]
|
||||
; CHECK-NEXT: [[LV:%.*]] = load i64, i64* [[P_64]], align 8
|
||||
; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]]
|
||||
; CHECK: bb1:
|
||||
; CHECK-NEXT: br label [[BB3:%.*]]
|
||||
|
@ -5,7 +5,7 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
|
||||
|
||||
define void @test1(i32* %Q, i32* %P) {
|
||||
; CHECK-LABEL: @test1(
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[Q:%.*]], i64 4), "nonnull"(i32* [[Q]]) ]
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[Q:%.*]], i64 4), "nonnull"(i32* [[Q]]), "align"(i32* [[Q]], i64 4) ]
|
||||
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P:%.*]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; CHECK-NEXT: store i32 0, i32* [[P]]
|
||||
; CHECK-NEXT: ret void
|
||||
|
@ -41,7 +41,7 @@ define i32 @test3(i32* %g_addr) nounwind {
|
||||
|
||||
define void @test4(i32* %Q) {
|
||||
; CHECK-LABEL: @test4(
|
||||
; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[Q:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
|
||||
; CHECK-NEXT: store volatile i32 [[A]], i32* [[Q]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
|
@ -55,7 +55,7 @@ define i32 @test3(i32* %g_addr) nounwind {
|
||||
|
||||
define void @test4(i32* %Q) {
|
||||
; CHECK-LABEL: @test4(
|
||||
; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[Q:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[Q:%.*]], align 4
|
||||
; CHECK-NEXT: store volatile i32 [[A]], i32* [[Q]]
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
@ -66,7 +66,7 @@ define void @test4(i32* %Q) {
|
||||
|
||||
define void @test5(i32* %Q) {
|
||||
; CHECK-LABEL: @test5(
|
||||
; CHECK-NEXT: [[A:%.*]] = load volatile i32, i32* [[Q:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = load volatile i32, i32* [[Q:%.*]], align 4
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%a = load volatile i32, i32* %Q
|
||||
|
@ -18,7 +18,7 @@ define i32 @test0(i32* %ptr, i1 %cond) {
|
||||
; USE_ASSUME-LABEL: @test0(
|
||||
; USE_ASSUME-NEXT: store i32 40, i32* [[PTR:%.*]]
|
||||
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 40
|
||||
;
|
||||
|
||||
@ -31,14 +31,14 @@ define i32 @test0(i32* %ptr, i1 %cond) {
|
||||
define i32 @test1(i32* %val, i1 %cond) {
|
||||
; We can CSE loads over a guard, since it does not clobber memory
|
||||
; NO_ASSUME-LABEL: @test1(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]]
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4
|
||||
; NO_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test1(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]]
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4
|
||||
; USE_ASSUME-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[VAL]], i64 4), "nonnull"(i32* [[VAL]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[VAL]], i64 4), "nonnull"(i32* [[VAL]]), "align"(i32* [[VAL]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
|
||||
@ -80,13 +80,13 @@ define i32 @test3(i32 %val) {
|
||||
define i32 @test3.unhandled(i32 %val) {
|
||||
; After a guard has executed the condition it was guarding is known to
|
||||
; be true.
|
||||
|
||||
; CHECK-LABEL: @test3.unhandled(
|
||||
; CHECK-NEXT: %cond0 = icmp slt i32 %val, 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ]
|
||||
; CHECK-NEXT: %cond1 = icmp sge i32 %val, 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret i32 0
|
||||
; CHECK-NEXT: [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: [[COND1:%.*]] = icmp sge i32 [[VAL]], 40
|
||||
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND1]]) [ "deopt"() ]
|
||||
; CHECK-NEXT: ret i32 0
|
||||
;
|
||||
|
||||
; Demonstrates a case we do not yet handle (it is legal to fold %cond2
|
||||
; to false)
|
||||
|
@ -7,16 +7,16 @@ declare void @clobber_and_use(i32)
|
||||
|
||||
define void @f_0(i32* %ptr) {
|
||||
; NO_ASSUME-LABEL: @f_0(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_0(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
@ -34,15 +34,15 @@ define void @f_0(i32* %ptr) {
|
||||
define void @f_1(i32* %ptr) {
|
||||
; We can forward invariant loads to non-invariant loads.
|
||||
; NO_ASSUME-LABEL: @f_1(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_1(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
@ -57,15 +57,15 @@ define void @f_1(i32* %ptr) {
|
||||
define void @f_2(i32* %ptr) {
|
||||
; We can forward a non-invariant load into an invariant load.
|
||||
; NO_ASSUME-LABEL: @f_2(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]]
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_2(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]]
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
;
|
||||
@ -79,7 +79,7 @@ define void @f_2(i32* %ptr) {
|
||||
|
||||
define void @f_3(i1 %cond, i32* %ptr) {
|
||||
; NO_ASSUME-LABEL: @f_3(
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; NO_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
|
||||
; NO_ASSUME: left:
|
||||
@ -89,11 +89,11 @@ define void @f_3(i1 %cond, i32* %ptr) {
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @f_3(
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
|
||||
; USE_ASSUME: left:
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
; USE_ASSUME: right:
|
||||
@ -119,11 +119,11 @@ define void @f_4(i1 %cond, i32* %ptr) {
|
||||
; CHECK-LABEL: @f_4(
|
||||
; CHECK-NEXT: br i1 [[COND:%.*]], label [[LEFT:%.*]], label [[MERGE:%.*]]
|
||||
; CHECK: left:
|
||||
; CHECK-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], !invariant.load !0
|
||||
; CHECK-NEXT: [[VAL0:%.*]] = load i32, i32* [[PTR:%.*]], align 4, !invariant.load !0
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL0]])
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[PTR]]
|
||||
; CHECK-NEXT: [[VAL1:%.*]] = load i32, i32* [[PTR]], align 4
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 [[VAL1]])
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
@ -148,12 +148,12 @@ merge:
|
||||
; to restore the same unchanging value.
|
||||
define void @test_dse1(i32* %p) {
|
||||
; NO_ASSUME-LABEL: @test_dse1(
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test_dse1(
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
@ -167,7 +167,7 @@ define void @test_dse1(i32* %p) {
|
||||
; By assumption, v1 must equal v2 (TODO)
|
||||
define void @test_false_negative_dse2(i32* %p, i32 %v2) {
|
||||
; CHECK-LABEL: @test_false_negative_dse2(
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
|
||||
; CHECK-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; CHECK-NEXT: store i32 [[V2:%.*]], i32* [[P]]
|
||||
; CHECK-NEXT: ret void
|
||||
@ -182,15 +182,15 @@ define void @test_false_negative_dse2(i32* %p, i32 %v2) {
|
||||
; it lets us remove later loads not explicitly marked invariant
|
||||
define void @test_scope_start_without_load(i32* %p) {
|
||||
; NO_ASSUME-LABEL: @test_scope_start_without_load(
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
|
||||
; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test_scope_start_without_load(
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
@ -210,7 +210,7 @@ define void @test_scope_start_without_load(i32* %p) {
|
||||
; load
|
||||
define void @test_scope_restart(i32* %p) {
|
||||
; NO_ASSUME-LABEL: @test_scope_restart(
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; NO_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; NO_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
@ -218,9 +218,9 @@ define void @test_scope_restart(i32* %p) {
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: @test_scope_restart(
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P:%.*]], align 4, !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V1]]
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[ADD]])
|
||||
; USE_ASSUME-NEXT: call void @clobber_and_use(i32 [[V1]])
|
||||
|
@ -11,13 +11,13 @@ declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind
|
||||
define i8 @test_bypass1(i8 *%P) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_bypass1
|
||||
; NO_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]], align 1
|
||||
; NO_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; NO_ASSUME-NEXT: ret i8 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_bypass1
|
||||
; USE_ASSUME-SAME: (i8* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i8, i8* [[P]], align 1
|
||||
; USE_ASSUME-NEXT: [[I:%.*]] = call {}* @llvm.invariant.start.p0i8(i64 1, i8* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[P]], i64 1), "nonnull"(i8* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret i8 0
|
||||
@ -107,16 +107,16 @@ define i32 @test_before_load(i32* %p) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
@ -130,17 +130,17 @@ define i32 @test_before_load(i32* %p) {
|
||||
define i32 @test_before_clobber(i32* %p) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_before_clobber
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
@ -154,7 +154,7 @@ define i32 @test_before_clobber(i32* %p) {
|
||||
define i32 @test_duplicate_scope(i32* %p) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
@ -162,11 +162,11 @@ define i32 @test_duplicate_scope(i32* %p) {
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_duplicate_scope
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: [[TMP2:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
@ -183,7 +183,7 @@ define i32 @test_unanalzyable_load(i32* %p) {
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
@ -191,9 +191,9 @@ define i32 @test_unanalzyable_load(i32* %p) {
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
@ -208,10 +208,10 @@ define i32 @test_unanalzyable_load(i32* %p) {
|
||||
define i32 @test_negative_after_clobber(i32* %p) {
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_after_clobber
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
@ -226,7 +226,7 @@ define i32 @test_negative_after_clobber(i32* %p) {
|
||||
define i32 @test_merge(i32* %p, i1 %cnd) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_merge
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; NO_ASSUME: taken:
|
||||
@ -237,14 +237,14 @@ define i32 @test_merge(i32* %p, i1 %cnd) {
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_merge
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; USE_ASSUME: taken:
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: br label [[MERGE]]
|
||||
; USE_ASSUME: merge:
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p
|
||||
@ -263,14 +263,14 @@ merge:
|
||||
define i32 @test_negative_after_mergeclobber(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_after_mergeclobber
|
||||
; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; CHECK: taken:
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
@ -292,14 +292,14 @@ merge:
|
||||
define i32 @test_false_negative_merge(i32* %p, i1 %cnd) {
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_false_negative_merge
|
||||
; CHECK-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; CHECK: taken:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: br label [[MERGE]]
|
||||
; CHECK: merge:
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
@ -321,7 +321,7 @@ define i32 @test_merge_unanalyzable_load(i32* %p, i1 %cnd) {
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; NO_ASSUME: taken:
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
@ -333,13 +333,13 @@ define i32 @test_merge_unanalyzable_load(i32* %p, i1 %cnd) {
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: br i1 [[CND]], label [[MERGE:%.*]], label [[TAKEN:%.*]]
|
||||
; USE_ASSUME: taken:
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: br label [[MERGE]]
|
||||
; USE_ASSUME: merge:
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
call {}* @llvm.invariant.start.p0i32(i64 4, i32* %p)
|
||||
@ -360,14 +360,14 @@ define void @test_dse_before_load(i32* %p, i1 %cnd) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_before_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: ret void
|
||||
@ -382,14 +382,14 @@ define void @test_dse_before_load(i32* %p, i1 %cnd) {
|
||||
define void @test_dse_after_load(i32* %p, i1 %cnd) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; NO_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret void
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_dse_after_load
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]], i1 [[CND:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; USE_ASSUME-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
@ -410,10 +410,10 @@ define i32 @test_false_negative_types(i32* %p) {
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_false_negative_types
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[PF:%.*]] = bitcast i32* [[P]] to float*
|
||||
; CHECK-NEXT: [[V2F:%.*]] = load float, float* [[PF]]
|
||||
; CHECK-NEXT: [[V2F:%.*]] = load float, float* [[PF]], align 4
|
||||
; CHECK-NEXT: [[V2:%.*]] = bitcast float [[V2F]] to i32
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
@ -432,9 +432,9 @@ define i32 @test_negative_size1(i32* %p) {
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_size1
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 3, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
@ -450,9 +450,9 @@ define i32 @test_negative_size2(i32* %p) {
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_negative_size2
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 0, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
@ -469,9 +469,9 @@ define i32 @test_negative_scope(i32* %p) {
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
;
|
||||
@ -488,9 +488,9 @@ define i32 @test_false_negative_scope(i32* %p) {
|
||||
; CHECK-LABEL: define {{[^@]+}}@test_false_negative_scope
|
||||
; CHECK-SAME: (i32* [[P:%.*]])
|
||||
; CHECK-NEXT: [[SCOPE:%.*]] = call {}* @llvm.invariant.start.p0i32(i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: call void @clobber()
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]]
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[P]], align 4
|
||||
; CHECK-NEXT: call void @llvm.invariant.end.p0i32({}* [[SCOPE]], i64 4, i32* [[P]])
|
||||
; CHECK-NEXT: [[SUB:%.*]] = sub i32 [[V1]], [[V2]]
|
||||
; CHECK-NEXT: ret i32 [[SUB]]
|
||||
@ -508,15 +508,15 @@ define i32 @test_false_negative_scope(i32* %p) {
|
||||
define i32 @test_invariant_load_scope(i32* %p) {
|
||||
; NO_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope
|
||||
; NO_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], !invariant.load !0
|
||||
; NO_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !0
|
||||
; NO_ASSUME-NEXT: call void @clobber()
|
||||
; NO_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
; USE_ASSUME-LABEL: define {{[^@]+}}@test_invariant_load_scope
|
||||
; USE_ASSUME-SAME: (i32* [[P:%.*]])
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], !invariant.load !0
|
||||
; USE_ASSUME-NEXT: [[V1:%.*]] = load i32, i32* [[P]], align 4, !invariant.load !0
|
||||
; USE_ASSUME-NEXT: call void @clobber()
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]) ]
|
||||
; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[P]], i64 4), "nonnull"(i32* [[P]]), "align"(i32* [[P]], i64 4) ]
|
||||
; USE_ASSUME-NEXT: ret i32 0
|
||||
;
|
||||
%v1 = load i32, i32* %p, !invariant.load !{}
|
||||
|
@ -117,7 +117,7 @@ entry:
|
||||
define i32 @test8(i1 %cnd, i32* %p) {
|
||||
; CHECK-LABEL: test8
|
||||
; CHECK: @bar
|
||||
; CHECK: load i32, i32* %p2, !invariant.load
|
||||
; CHECK: load i32, i32* %p2, align 4, !invariant.load
|
||||
; CHECK: br label %merge
|
||||
entry:
|
||||
%v1 = load i32, i32* %p, !invariant.load !0
|
||||
|
@ -8,7 +8,7 @@ block1:
|
||||
block2:
|
||||
br label %block4
|
||||
; CHECK: block2:
|
||||
; CHECK-NEXT: load i32, i32* %p, !range !0, !invariant.group !1
|
||||
; CHECK-NEXT: load i32, i32* %p, align 4, !range !0, !invariant.group !1
|
||||
|
||||
block3:
|
||||
store i32 0, i32* %p
|
||||
|
@ -6,7 +6,7 @@ target datalayout = "e-p:64:64:64"
|
||||
; CHECK: entry.end_crit_edge:
|
||||
; CHECK: %[[INDEX:[a-z0-9.]+]] = sext i32 %x to i64{{.*}} !dbg [[ZERO_LOC:![0-9]+]]
|
||||
; CHECK: %[[ADDRESS:[a-z0-9.]+]] = getelementptr [100 x i32], [100 x i32]* @G, i64 0, i64 %[[INDEX]]{{.*}} !dbg [[ZERO_LOC]]
|
||||
; CHECK: %n.pre = load i32, i32* %[[ADDRESS]], !dbg [[N_LOC:![0-9]+]]
|
||||
; CHECK: %n.pre = load i32, i32* %[[ADDRESS]], align 4, !dbg [[N_LOC:![0-9]+]]
|
||||
; CHECK: br label %end
|
||||
; CHECK: then:
|
||||
; CHECK: store i32 %z
|
||||
|
@ -61,7 +61,7 @@ entry:
|
||||
define i32 @test4(i32* noalias nocapture %p, i32* noalias nocapture %q) {
|
||||
; CHECK-LABEL: @test4(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[P:%.*]]
|
||||
; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[P:%.*]], align 4
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load atomic volatile i32, i32* [[Q:%.*]] seq_cst, align 4
|
||||
; CHECK-NEXT: [[Y:%.*]] = load atomic i32, i32* [[P]] seq_cst, align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = sub i32 [[Y]], [[X]]
|
||||
@ -160,12 +160,12 @@ exit:
|
||||
define i32 @test8(i1 %b, i1 %c, i32* noalias %p, i32* noalias %q) {
|
||||
; CHECK-LABEL: @test8(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[Y1:%.*]] = load i32, i32* [[P:%.*]]
|
||||
; CHECK-NEXT: [[Y1:%.*]] = load i32, i32* [[P:%.*]], align 4
|
||||
; CHECK-NEXT: call void @use(i32 [[Y1]])
|
||||
; CHECK-NEXT: br label [[HEADER:%.*]]
|
||||
; CHECK: header:
|
||||
; CHECK-NEXT: [[Y:%.*]] = phi i32 [ [[Y_PRE:%.*]], [[SKIP_HEADER_CRIT_EDGE:%.*]] ], [ [[Y]], [[HEADER]] ], [ [[Y1]], [[ENTRY:%.*]] ]
|
||||
; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[Q:%.*]]
|
||||
; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[Q:%.*]], align 4
|
||||
; CHECK-NEXT: call void @use(i32 [[Y]])
|
||||
; CHECK-NEXT: br i1 [[B:%.*]], label [[SKIP:%.*]], label [[HEADER]]
|
||||
; CHECK: skip:
|
||||
|
@ -161,7 +161,7 @@ enter:
|
||||
%ptr = alloca i8
|
||||
store i8 42, i8* %ptr
|
||||
call void @foo(i8* %ptr)
|
||||
; CHECK: %[[A:.*]] = load i8, i8* %ptr, !invariant.group
|
||||
; CHECK: %[[A:.*]] = load i8, i8* %ptr, align 1, !invariant.group
|
||||
%a = load i8, i8* %ptr, !invariant.group !0
|
||||
; CHECK-NOT: load
|
||||
%b = load i8, i8* %ptr, !invariant.group !0
|
||||
@ -178,7 +178,7 @@ enter:
|
||||
%ptr = alloca i8
|
||||
store i8 42, i8* %ptr
|
||||
call void @foo(i8* %ptr)
|
||||
; CHECK: %[[D:.*]] = load i8, i8* %ptr, !invariant.group
|
||||
; CHECK: %[[D:.*]] = load i8, i8* %ptr, align 1, !invariant.group
|
||||
%c = load i8, i8* %ptr
|
||||
; CHECK-NOT: load
|
||||
%d = load i8, i8* %ptr, !invariant.group !0
|
||||
@ -195,7 +195,7 @@ enter:
|
||||
%ptr = alloca i8
|
||||
store i8 42, i8* %ptr
|
||||
call void @foo(i8* %ptr)
|
||||
; CHECK: %[[E:.*]] = load i8, i8* %ptr, !invariant.group
|
||||
; CHECK: %[[E:.*]] = load i8, i8* %ptr, align 1, !invariant.group
|
||||
%e = load i8, i8* %ptr, !invariant.group !0
|
||||
; CHECK-NOT: load
|
||||
%f = load i8, i8* %ptr
|
||||
@ -212,7 +212,7 @@ enter:
|
||||
%ptr = alloca i8
|
||||
store i8 42, i8* %ptr
|
||||
call void @foo(i8* %ptr)
|
||||
; CHECK: %[[E:.*]] = load i8, i8* %ptr, !invariant.group
|
||||
; CHECK: %[[E:.*]] = load i8, i8* %ptr, align 1, !invariant.group
|
||||
%e = load i8, i8* %ptr, !invariant.group !0
|
||||
; CHECK-NOT: load
|
||||
%f = load i8, i8* %ptr, !invariant.group !0
|
||||
@ -358,10 +358,10 @@ _Z1gR1A.exit: ; preds = %0, %5
|
||||
; from the same function.
|
||||
; CHECK-LABEL: define void @testGlobal() {
|
||||
define void @testGlobal() {
|
||||
; CHECK: %a = load i8, i8* @unknownPtr, !invariant.group !0
|
||||
; CHECK: %a = load i8, i8* @unknownPtr, align 1, !invariant.group !0
|
||||
%a = load i8, i8* @unknownPtr, !invariant.group !0
|
||||
call void @foo2(i8* @unknownPtr, i8 %a)
|
||||
; CHECK: %1 = load i8, i8* @unknownPtr, !invariant.group !0
|
||||
; CHECK: %1 = load i8, i8* @unknownPtr, align 1, !invariant.group !0
|
||||
%1 = load i8, i8* @unknownPtr, !invariant.group !0
|
||||
call void @bar(i8 %1)
|
||||
|
||||
@ -381,7 +381,7 @@ define void @testGlobal() {
|
||||
define void @testNotGlobal() {
|
||||
%a = alloca i8
|
||||
call void @foo(i8* %a)
|
||||
; CHECK: %b = load i8, i8* %a, !invariant.group !0
|
||||
; CHECK: %b = load i8, i8* %a, align 1, !invariant.group !0
|
||||
%b = load i8, i8* %a, !invariant.group !0
|
||||
call void @foo2(i8* %a, i8 %b)
|
||||
|
||||
|
@ -13,7 +13,7 @@ define i32 @test1(i32* %p, i32* %q) {
|
||||
|
||||
define i32 @test2(i32* %p, i32* %q) {
|
||||
; CHECK-LABEL: @test2(i32* %p, i32* %q)
|
||||
; CHECK: load i32, i32* %p, !alias.scope !0
|
||||
; CHECK: load i32, i32* %p, align 4, !alias.scope !0
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !alias.scope !0
|
||||
%b = load i32, i32* %p, !alias.scope !0
|
||||
@ -27,7 +27,7 @@ define i32 @test2(i32* %p, i32* %q) {
|
||||
; throw in between.
|
||||
define i32 @test3(i32* %p, i32* %q) {
|
||||
; CHECK-LABEL: @test3(i32* %p, i32* %q)
|
||||
; CHECK: load i32, i32* %p, !alias.scope !1
|
||||
; CHECK: load i32, i32* %p, align 4, !alias.scope !1
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !alias.scope !1
|
||||
%b = load i32, i32* %p, !alias.scope !2
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
define i32 @test1(i32* %p) {
|
||||
; CHECK-LABEL: @test1(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p, !range !0
|
||||
@ -12,7 +12,7 @@ define i32 @test1(i32* %p) {
|
||||
|
||||
define i32 @test2(i32* %p) {
|
||||
; CHECK-LABEL: @test2(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p
|
||||
@ -22,7 +22,7 @@ define i32 @test2(i32* %p) {
|
||||
|
||||
define i32 @test3(i32* %p) {
|
||||
; CHECK-LABEL: @test3(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p, !range !1
|
||||
@ -32,7 +32,7 @@ define i32 @test3(i32* %p) {
|
||||
|
||||
define i32 @test4(i32* %p) {
|
||||
; CHECK-LABEL: @test4(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p, !range !2
|
||||
@ -42,7 +42,7 @@ define i32 @test4(i32* %p) {
|
||||
|
||||
define i32 @test5(i32* %p) {
|
||||
; CHECK-LABEL: @test5(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE3:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE3:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !3
|
||||
%b = load i32, i32* %p, !range !4
|
||||
@ -52,7 +52,7 @@ define i32 @test5(i32* %p) {
|
||||
|
||||
define i32 @test6(i32* %p) {
|
||||
; CHECK-LABEL: @test6(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE5:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE5:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !5
|
||||
%b = load i32, i32* %p, !range !6
|
||||
@ -62,7 +62,7 @@ define i32 @test6(i32* %p) {
|
||||
|
||||
define i32 @test7(i32* %p) {
|
||||
; CHECK-LABEL: @test7(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE7:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE7:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !7
|
||||
%b = load i32, i32* %p, !range !8
|
||||
@ -72,7 +72,7 @@ define i32 @test7(i32* %p) {
|
||||
|
||||
define i32 @test8(i32* %p) {
|
||||
; CHECK-LABEL: @test8(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE9:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE9:[0-9]+]]
|
||||
; CHECK-NOT: range
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !9
|
||||
|
@ -66,7 +66,7 @@ define void @test_02(i32* %arr, i32* %a_len_ptr) {
|
||||
|
||||
; CHECK: test_02(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECH-NEXT: br i1 true, label %loop.preloop.preheader
|
||||
; CHECK: mainloop:
|
||||
; CHECK-NEXT: br label %loop
|
||||
|
@ -226,7 +226,7 @@ define void @test_05(i32* %p) {
|
||||
; CHECK-LABEL: test_05
|
||||
; CHECK-NOT: preloop
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %n = load i32, i32* %p, !range !
|
||||
; CHECK-NEXT: %n = load i32, i32* %p, align 4, !range !
|
||||
; CHECK-NEXT: [[CMP_1:%[^ ]+]] = icmp ugt i32 %n, 2
|
||||
; CHECK-NEXT: %exit.mainloop.at = select i1 [[CMP_1]], i32 %n, i32 2
|
||||
; CHECK-NEXT: [[CMP_2:%[^ ]+]] = icmp ult i32 2, %exit.mainloop.at
|
||||
|
@ -22,7 +22,7 @@ define void @test_01(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
; CHECK-LABEL: test_01(
|
||||
; CHECK-NOT: preloop
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[SUB1:%[^ ]+]] = add nsw i32 %len, -13
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp slt i32 [[SUB1]], 101
|
||||
; CHECK-NEXT: [[SMAX:%[^ ]+]] = select i1 [[CMP1]], i32 [[SUB1]], i32 101
|
||||
@ -78,7 +78,7 @@ define void @test_02(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK-LABEL: test_02(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[LEN_MINUS_SMAX:%[^ ]+]] = add nuw nsw i32 %len, -2147483647
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp sgt i32 [[LEN_MINUS_SMAX]], -13
|
||||
; CHECK-NEXT: [[SMAX1:%[^ ]+]] = select i1 [[CMP1]], i32 [[LEN_MINUS_SMAX]], i32 -13
|
||||
@ -145,7 +145,7 @@ define void @test_03(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
; CHECK-LABEL: test_03(
|
||||
; CHECK-NOT: preloop
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp slt i32 %len, 13
|
||||
; CHECK-NEXT: [[SMAX1:%[^ ]+]] = select i1 [[CMP1]], i32 %len, i32 13
|
||||
; CHECK-NEXT: [[SUB3:%[^ ]+]] = sub i32 %len, [[SMAX1]]
|
||||
@ -199,7 +199,7 @@ define void @test_04(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK-LABEL: test_04(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[SUB1:%[^ ]+]] = add nuw i32 %len, 13
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp ult i32 [[SUB1]], 101
|
||||
; CHECK-NEXT: %exit.mainloop.at = select i1 [[CMP1]], i32 [[SUB1]], i32 101
|
||||
@ -242,7 +242,7 @@ define void @test_05(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
; CHECK-LABEL: test_05(
|
||||
; CHECK-NOT: preloop
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[SUB1:%[^ ]+]] = add nsw i32 %len, -13
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp slt i32 [[SUB1]], 101
|
||||
; CHECK-NEXT: [[SMAX:%[^ ]+]] = select i1 [[CMP1]], i32 [[SUB1]], i32 101
|
||||
@ -283,7 +283,7 @@ define void @test_06(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK-LABEL: test_06(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[LEN_MINUS_SMAX:%[^ ]+]] = add nuw nsw i32 %len, -2147483647
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp sgt i32 [[LEN_MINUS_SMAX]], -13
|
||||
; CHECK-NEXT: [[SMAX1:%[^ ]+]] = select i1 [[CMP1]], i32 [[LEN_MINUS_SMAX]], i32 -13
|
||||
@ -329,7 +329,7 @@ define void @test_07(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
; CHECK-LABEL: test_07(
|
||||
; CHECK-NOT: preloop
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp slt i32 %len, 13
|
||||
; CHECK-NEXT: [[SMAX1:%[^ ]+]] = select i1 [[CMP1]], i32 %len, i32 13
|
||||
; CHECK-NEXT: [[SUB3:%[^ ]+]] = sub i32 %len, [[SMAX1]]
|
||||
@ -370,7 +370,7 @@ define void @test_08(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK-LABEL: test_08(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[SUB1:%[^ ]+]] = add nuw i32 %len, 13
|
||||
; CHECK-NEXT: [[CMP1:%[^ ]+]] = icmp ult i32 [[SUB1]], 101
|
||||
; CHECK-NEXT: %exit.mainloop.at = select i1 [[CMP1]], i32 [[SUB1]], i32 101
|
||||
|
@ -13,7 +13,7 @@ define void @test_01(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_01(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND:%[^ ]+]] = icmp ult i32 0, %exit.mainloop.at
|
||||
; CHECK-NEXT: br i1 [[COND]], label %loop.preheader, label %main.pseudo.exit
|
||||
; CHECK: loop:
|
||||
@ -56,7 +56,7 @@ define void @test_02(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_02(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND1:%[^ ]+]] = icmp ugt i32 %len, 1
|
||||
; CHECK-NEXT: [[UMIN:%[^ ]+]] = select i1 [[COND1]], i32 %len, i32 1
|
||||
; CHECK-NEXT: %exit.preloop.at = add nsw i32 [[UMIN]], -1
|
||||
@ -104,7 +104,7 @@ define void @test_03(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_03(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND:%[^ ]+]] = icmp ult i32 0, %exit.mainloop.at
|
||||
; CHECK-NEXT: br i1 [[COND]], label %loop.preheader, label %main.pseudo.exit
|
||||
; CHECK: loop:
|
||||
@ -147,7 +147,7 @@ define void @test_04(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_04(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND1:%[^ ]+]] = icmp ugt i32 %len, 1
|
||||
; CHECK-NEXT: [[UMIN:%[^ ]+]] = select i1 [[COND1]], i32 %len, i32 1
|
||||
; CHECK-NEXT: %exit.preloop.at = add nsw i32 [[UMIN]], -1
|
||||
|
@ -16,7 +16,7 @@ define void @test_01(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_01
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND:%[^ ]+]] = icmp ult i32 0, %exit.mainloop.at
|
||||
; CHECK-NEXT: br i1 [[COND]], label %loop.preheader, label %main.pseudo.exit
|
||||
; CHECK: loop:
|
||||
@ -59,7 +59,7 @@ define void @test_02(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_02(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND1:%[^ ]+]] = icmp ugt i32 %len, 1
|
||||
; CHECK-NEXT: [[UMIN:%[^ ]+]] = select i1 [[COND1]], i32 %len, i32 1
|
||||
; CHECK-NEXT: %exit.preloop.at = add nsw i32 [[UMIN]], -1
|
||||
@ -107,7 +107,7 @@ define void @test_03(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_03
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND:%[^ ]+]] = icmp ult i32 0, %exit.mainloop.at
|
||||
; CHECK-NEXT: br i1 [[COND]], label %loop.preheader, label %main.pseudo.exit
|
||||
; CHECK: loop:
|
||||
@ -150,7 +150,7 @@ define void @test_04(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_04
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND:%[^ ]+]] = icmp ult i32 0, %exit.mainloop.at
|
||||
; CHECK-NEXT: br i1 [[COND]], label %loop.preheader, label %main.pseudo.exit
|
||||
; CHECK: loop:
|
||||
@ -192,7 +192,7 @@ exit:
|
||||
define void @test_05(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
; CHECK: test_05(
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %len = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND1:%[^ ]+]] = icmp ugt i32 %len, 1
|
||||
; CHECK-NEXT: [[UMIN:%[^ ]+]] = select i1 [[COND1]], i32 %len, i32 1
|
||||
; CHECK-NEXT: %exit.preloop.at = add nsw i32 [[UMIN]], -1
|
||||
@ -240,7 +240,7 @@ define void @test_06(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_06
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND:%[^ ]+]] = icmp ult i32 0, %exit.mainloop.at
|
||||
; CHECK-NEXT: br i1 [[COND]], label %loop.preheader, label %main.pseudo.exit
|
||||
; CHECK: loop:
|
||||
@ -316,7 +316,7 @@ define void @test_08(i32* %arr, i32* %a_len_ptr) #0 {
|
||||
|
||||
; CHECK: test_08
|
||||
; CHECK: entry:
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, !range !0
|
||||
; CHECK-NEXT: %exit.mainloop.at = load i32, i32* %a_len_ptr, align 4, !range !0
|
||||
; CHECK-NEXT: [[COND:%[^ ]+]] = icmp ult i32 0, %exit.mainloop.at
|
||||
; CHECK-NEXT: br i1 [[COND]], label %loop.preheader, label %main.pseudo.exit
|
||||
; CHECK: loop:
|
||||
|
@ -557,21 +557,21 @@ define void @store_undef_mask_factor4(<16 x i32>* %ptr, <4 x i32> %v0, <4 x i32>
|
||||
define void @load_address_space(<8 x i32> addrspace(1)* %ptr) {
|
||||
; CHECK-NEON-LABEL: @load_address_space(
|
||||
; CHECK-NEON-NEXT: [[TMP1:%.*]] = bitcast <8 x i32> addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)*
|
||||
; CHECK-NEON-NEXT: [[VLDN:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32.p1i8(i8 addrspace(1)* [[TMP1]], i32 0)
|
||||
; CHECK-NEON-NEXT: [[VLDN:%.*]] = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32.p1i8(i8 addrspace(1)* [[TMP1]], i32 32)
|
||||
; CHECK-NEON-NEXT: [[TMP2:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[VLDN]], 2
|
||||
; CHECK-NEON-NEXT: [[TMP3:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[VLDN]], 1
|
||||
; CHECK-NEON-NEXT: [[TMP4:%.*]] = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } [[VLDN]], 0
|
||||
; CHECK-NEON-NEXT: ret void
|
||||
;
|
||||
; CHECK-MVE-LABEL: @load_address_space(
|
||||
; CHECK-MVE-NEXT: [[INTERLEAVED_VEC:%.*]] = load <8 x i32>, <8 x i32> addrspace(1)* [[PTR:%.*]]
|
||||
; CHECK-MVE-NEXT: [[INTERLEAVED_VEC:%.*]] = load <8 x i32>, <8 x i32> addrspace(1)* [[PTR:%.*]], align 32
|
||||
; CHECK-MVE-NEXT: [[V0:%.*]] = shufflevector <8 x i32> [[INTERLEAVED_VEC]], <8 x i32> undef, <2 x i32> <i32 0, i32 3>
|
||||
; CHECK-MVE-NEXT: [[V1:%.*]] = shufflevector <8 x i32> [[INTERLEAVED_VEC]], <8 x i32> undef, <2 x i32> <i32 1, i32 4>
|
||||
; CHECK-MVE-NEXT: [[V2:%.*]] = shufflevector <8 x i32> [[INTERLEAVED_VEC]], <8 x i32> undef, <2 x i32> <i32 2, i32 5>
|
||||
; CHECK-MVE-NEXT: ret void
|
||||
;
|
||||
; CHECK-NONE-LABEL: @load_address_space(
|
||||
; CHECK-NONE-NEXT: [[INTERLEAVED_VEC:%.*]] = load <8 x i32>, <8 x i32> addrspace(1)* [[PTR:%.*]]
|
||||
; CHECK-NONE-NEXT: [[INTERLEAVED_VEC:%.*]] = load <8 x i32>, <8 x i32> addrspace(1)* [[PTR:%.*]], align 32
|
||||
; CHECK-NONE-NEXT: [[V0:%.*]] = shufflevector <8 x i32> [[INTERLEAVED_VEC]], <8 x i32> undef, <2 x i32> <i32 0, i32 3>
|
||||
; CHECK-NONE-NEXT: [[V1:%.*]] = shufflevector <8 x i32> [[INTERLEAVED_VEC]], <8 x i32> undef, <2 x i32> <i32 1, i32 4>
|
||||
; CHECK-NONE-NEXT: [[V2:%.*]] = shufflevector <8 x i32> [[INTERLEAVED_VEC]], <8 x i32> undef, <2 x i32> <i32 2, i32 5>
|
||||
|
@ -16,7 +16,7 @@ declare void @use(i32 *)
|
||||
; CHECK-NEXT: ret void
|
||||
|
||||
; CHECK-LABEL: ret2:
|
||||
; CHECK-NEXT: %[[p2:.*]] = load i32*, i32** %ptr, !nonnull !0
|
||||
; CHECK-NEXT: %[[p2:.*]] = load i32*, i32** %ptr, align 4, !nonnull !0
|
||||
; CHECK: tail call void @use(i32* %[[p2]])
|
||||
; CHECK-NEXT: ret void
|
||||
define void @test1(i32** %ptr, i1 %c) {
|
||||
@ -49,13 +49,13 @@ ret2:
|
||||
; loaded value.
|
||||
; CHECK-LABEL: @test2(
|
||||
; CHECK-LABEL: d3.thread:
|
||||
; CHECK-NEXT: %[[p1:.*]] = load i32*, i32** %ptr, !nonnull !0
|
||||
; CHECK-NEXT: %[[p1:.*]] = load i32*, i32** %ptr, align 4, !nonnull !0
|
||||
; CHECK-NEXT: store i32 1, i32* %[[p1]]
|
||||
; CHECK-NEXT: br label %ret1
|
||||
|
||||
; CHECK-LABEL: d3:
|
||||
; CHECK-NEXT: %[[p_cmp:.*]] = load i32*, i32** %ptr
|
||||
; CHECK-NEXT: %[[p2:.*]] = load i32*, i32** %ptr, !nonnull !0
|
||||
; CHECK-NEXT: %[[p2:.*]] = load i32*, i32** %ptr, align 4, !nonnull !0
|
||||
; CHECK-NEXT: store i32 1, i32* %[[p2]]
|
||||
; CHECK-NEXT: icmp eq i32* %[[p_cmp]], null
|
||||
define void @test2(i32** %ptr, i1 %c) {
|
||||
|
@ -251,7 +251,7 @@ bb3:
|
||||
; branch.
|
||||
define void @test8(i32*, i32*, i32*) {
|
||||
; CHECK-LABEL: @test8(
|
||||
; CHECK: %a = load i32, i32* %0, !range ![[RANGE4:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %0, align 4, !range ![[RANGE4:[0-9]+]]
|
||||
; CHECK-NEXT: store i32 %a
|
||||
; CHECK-NEXT: %xxx = tail call i32 (...) @f1()
|
||||
; CHECK-NEXT: ret void
|
||||
@ -277,14 +277,14 @@ define void @test9(i32*, i32*, i32*, i1 %c) {
|
||||
br i1 %c, label %d1, label %d2
|
||||
|
||||
; CHECK: d1:
|
||||
; CHECK-NEXT: %a = load i32, i32* %0{{$}}
|
||||
; CHECK-NEXT: %a = load i32, i32* %0, align 4{{$}}
|
||||
d1:
|
||||
%a = load i32, i32* %0, !range !4, !alias.scope !9, !noalias !10
|
||||
br label %d3
|
||||
|
||||
; CHECK: d2:
|
||||
; CHECK-NEXT: %xxxx = tail call i32 (...) @f1()
|
||||
; CHECK-NEXT: %b.pr = load i32, i32* %0, !tbaa !0{{$}}
|
||||
; CHECK-NEXT: %b.pr = load i32, i32* %0, align 4, !tbaa !0{{$}}
|
||||
d2:
|
||||
%xxxx = tail call i32 (...) @f1() nounwind
|
||||
br label %d3
|
||||
|
@ -118,7 +118,7 @@ define void @test3(i1 %cond, i32* %ptr) {
|
||||
; CHECK-NEXT: br label [[LOOP:%.*]]
|
||||
; CHECK: loop:
|
||||
; CHECK-NEXT: [[X:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[X_INC:%.*]], [[LOOP]] ]
|
||||
; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[PTR:%.*]]
|
||||
; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[PTR:%.*]], align 4
|
||||
; CHECK-NEXT: store i32 0, i32* [[PTR]]
|
||||
; CHECK-NEXT: [[X_INC]] = add i32 [[X]], [[VAL]]
|
||||
; CHECK-NEXT: br label [[LOOP]]
|
||||
|
@ -443,7 +443,7 @@ entry:
|
||||
br i1 %cmp11, label %for.body, label %for.end
|
||||
|
||||
; CHECK: for.body.preheader:
|
||||
; CHECK: %c = load i32*, i32** %cptr, !dereferenceable !0
|
||||
; CHECK: %c = load i32*, i32** %cptr, align 8, !dereferenceable !0
|
||||
; CHECK: %d = load i32, i32* %c, align 4
|
||||
|
||||
|
||||
|
@ -52,11 +52,11 @@ for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
;; memcpy.atomic formation rejection (atomic store, normal load w/ no align)
|
||||
;; memcpy.atomic formation (atomic store, normal load w/ no align)
|
||||
define void @test2b(i64 %Size) nounwind ssp {
|
||||
; CHECK-LABEL: @test2b(
|
||||
; CHECK-NOT: call void @llvm.memcpy.element.unordered.atomic
|
||||
; CHECK: store
|
||||
; CHECK: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %Dest, i8* align 1 %Base, i64 %Size, i32 1)
|
||||
; CHECK-NOT: store
|
||||
; CHECK: ret void
|
||||
bb.nph:
|
||||
%Base = alloca i8, i32 10000
|
||||
|
@ -10,7 +10,7 @@ define i16 @full_unroll(i16* %A) {
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: br label [[FOR_COND:%.*]]
|
||||
; CHECK: for.cond:
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* [[A:%.*]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* [[A:%.*]], align 2
|
||||
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3:%.*]]
|
||||
; CHECK: for.cond.cleanup:
|
||||
; CHECK-NEXT: [[DOTLCSSA10_LCSSA:%.*]] = phi i16 [ [[TMP2_2:%.*]], [[FOR_COND_CLEANUP3_2:%.*]] ]
|
||||
@ -18,15 +18,15 @@ define i16 @full_unroll(i16* %A) {
|
||||
; CHECK-NEXT: ret i16 0
|
||||
; CHECK: for.cond.cleanup3:
|
||||
; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 1
|
||||
; CHECK-NEXT: [[TMP2_1:%.*]] = load i16, i16* [[PTR_1]]
|
||||
; CHECK-NEXT: [[TMP2_1:%.*]] = load i16, i16* [[PTR_1]], align 2
|
||||
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3_1:%.*]]
|
||||
; CHECK: for.cond.cleanup3.1:
|
||||
; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 2
|
||||
; CHECK-NEXT: [[TMP2_2]] = load i16, i16* [[PTR_2]]
|
||||
; CHECK-NEXT: [[TMP2_2]] = load i16, i16* [[PTR_2]], align 2
|
||||
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3_2]]
|
||||
; CHECK: for.cond.cleanup3.2:
|
||||
; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 3
|
||||
; CHECK-NEXT: [[TMP2_3:%.*]] = load i16, i16* [[PTR_3]]
|
||||
; CHECK-NEXT: [[TMP2_3:%.*]] = load i16, i16* [[PTR_3]], align 2
|
||||
; CHECK-NEXT: br i1 false, label [[FOR_COND_CLEANUP3_3:%.*]], label [[FOR_COND_CLEANUP:%.*]]
|
||||
; CHECK: for.cond.cleanup3.3:
|
||||
; CHECK-NEXT: unreachable
|
||||
@ -60,7 +60,7 @@ define i16 @partial_unroll(i16* %A) {
|
||||
; CHECK: for.cond:
|
||||
; CHECK-NEXT: [[I_0:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC9_2:%.*]], [[FOR_COND_CLEANUP3_2:%.*]] ]
|
||||
; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i16, i16* [[A:%.*]], i64 [[I_0]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* [[PTR]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* [[PTR]], align 2
|
||||
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3:%.*]]
|
||||
; CHECK: for.cond.cleanup:
|
||||
; CHECK-NEXT: [[DOTLCSSA10_LCSSA:%.*]] = phi i16 [ [[TMP2_1:%.*]], [[FOR_COND_CLEANUP3_1:%.*]] ]
|
||||
@ -69,12 +69,12 @@ define i16 @partial_unroll(i16* %A) {
|
||||
; CHECK: for.cond.cleanup3:
|
||||
; CHECK-NEXT: [[INC9:%.*]] = add nuw nsw i64 [[I_0]], 1
|
||||
; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 [[INC9]]
|
||||
; CHECK-NEXT: [[TMP2_1]] = load i16, i16* [[PTR_1]]
|
||||
; CHECK-NEXT: [[TMP2_1]] = load i16, i16* [[PTR_1]], align 2
|
||||
; CHECK-NEXT: br label [[FOR_COND_CLEANUP3_1]]
|
||||
; CHECK: for.cond.cleanup3.1:
|
||||
; CHECK-NEXT: [[INC9_1:%.*]] = add nuw nsw i64 [[INC9]], 1
|
||||
; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr inbounds i16, i16* [[A]], i64 [[INC9_1]]
|
||||
; CHECK-NEXT: [[TMP2_2:%.*]] = load i16, i16* [[PTR_2]]
|
||||
; CHECK-NEXT: [[TMP2_2:%.*]] = load i16, i16* [[PTR_2]], align 2
|
||||
; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult i64 [[INC9_1]], 200
|
||||
; CHECK-NEXT: br i1 [[CMP_2]], label [[FOR_COND_CLEANUP3_2]], label [[FOR_COND_CLEANUP:%.*]]
|
||||
; CHECK: for.cond.cleanup3.2:
|
||||
|
@ -7,7 +7,7 @@
|
||||
define float @minloop(float* nocapture readonly %arg) {
|
||||
; CHECK-LABEL: @minloop(
|
||||
; CHECK-NEXT: top:
|
||||
; CHECK-NEXT: [[T:%.*]] = load float, float* [[ARG:%.*]]
|
||||
; CHECK-NEXT: [[T:%.*]] = load float, float* [[ARG:%.*]], align 4
|
||||
; CHECK-NEXT: br label [[LOOP:%.*]]
|
||||
; CHECK: loop:
|
||||
; CHECK-NEXT: [[T1:%.*]] = phi i64 [ [[T7:%.*]], [[LOOP]] ], [ 1, [[TOP:%.*]] ]
|
||||
@ -47,7 +47,7 @@ out: ; preds = %loop
|
||||
define float @minloopattr(float* nocapture readonly %arg) #0 {
|
||||
; CHECK-LABEL: @minloopattr(
|
||||
; CHECK-NEXT: top:
|
||||
; CHECK-NEXT: [[T:%.*]] = load float, float* [[ARG:%.*]]
|
||||
; CHECK-NEXT: [[T:%.*]] = load float, float* [[ARG:%.*]], align 4
|
||||
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
||||
; CHECK: vector.ph:
|
||||
; CHECK-NEXT: [[MINMAX_IDENT_SPLATINSERT:%.*]] = insertelement <4 x float> undef, float [[T]], i32 0
|
||||
@ -120,7 +120,7 @@ out: ; preds = %loop
|
||||
define float @minloopnovec(float* nocapture readonly %arg) {
|
||||
; CHECK-LABEL: @minloopnovec(
|
||||
; CHECK-NEXT: top:
|
||||
; CHECK-NEXT: [[T:%.*]] = load float, float* [[ARG:%.*]]
|
||||
; CHECK-NEXT: [[T:%.*]] = load float, float* [[ARG:%.*]], align 4
|
||||
; CHECK-NEXT: br label [[LOOP:%.*]]
|
||||
; CHECK: loop:
|
||||
; CHECK-NEXT: [[T1:%.*]] = phi i64 [ [[T7:%.*]], [[LOOP]] ], [ 1, [[TOP:%.*]] ]
|
||||
|
@ -67,10 +67,10 @@ define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i32 0
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32* [[TMP4]] to <4 x i32>*
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP5]], align 4, !alias.scope !0
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP1]], !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP1]], !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP1]], !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP1]], align 4, !alias.scope !3
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> undef, i32 [[TMP6]], i32 0
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP7]], i32 1
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> [[TMP11]], i32 [[TMP8]], i32 2
|
||||
@ -101,11 +101,11 @@ define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
|
||||
; CHECK: .inner:
|
||||
; CHECK-NEXT: [[J:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[DOTINNER]] ]
|
||||
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 0, i64 [[J]]
|
||||
; CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]]
|
||||
; CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP1]]
|
||||
; CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4
|
||||
; CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP1]], align 4
|
||||
; CHECK-NEXT: [[TMP24:%.*]] = add nsw i32 [[TMP23]], [[TMP22]]
|
||||
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[OBJ]], i64 0, i32 2, i64 [[I]], i64 [[J]]
|
||||
; CHECK-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]]
|
||||
; CHECK-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4
|
||||
; CHECK-NEXT: [[TMP27:%.*]] = add nsw i32 [[TMP24]], [[TMP26]]
|
||||
; CHECK-NEXT: store i32 [[TMP27]], i32* [[TMP25]]
|
||||
; CHECK-NEXT: [[J_NEXT]] = add nuw nsw i64 [[J]], 1
|
||||
|
@ -34,10 +34,10 @@ define void @test_memcpy(%T* noalias align 8 %a, %T* noalias align 16 %b) {
|
||||
; memcpy(%d, %a) should not be generated since store2 may-aliases load %a.
|
||||
define void @f(%T* %a, %T* %b, %T* %c, %T* %d) {
|
||||
; CHECK-LABEL: @f(
|
||||
; CHECK-NEXT: [[VAL:%.*]] = load %T, %T* %a, !alias.scope !0
|
||||
; CHECK-NEXT: store %T { i8 23, i32 23 }, %T* %b, !alias.scope !3
|
||||
; CHECK-NEXT: store %T { i8 44, i32 44 }, %T* %c, !alias.scope !6, !noalias !3
|
||||
; CHECK-NEXT: store %T [[VAL]], %T* %d, !alias.scope !9, !noalias !12
|
||||
; CHECK-NEXT: [[VAL:%.*]] = load [[T:%.*]], %T* [[A:%.*]], align 4, !alias.scope !0
|
||||
; CHECK-NEXT: store [[T]] { i8 23, i32 23 }, %T* [[B:%.*]], !alias.scope !3
|
||||
; CHECK-NEXT: store [[T]] { i8 44, i32 44 }, %T* [[C:%.*]], !alias.scope !6, !noalias !3
|
||||
; CHECK-NEXT: store [[T]] %val, %T* [[D:%.*]], !alias.scope !9, !noalias !12
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
%val = load %T, %T* %a, !alias.scope !{!10}
|
||||
|
@ -20,8 +20,8 @@ define i1 @cmp_no_range(i8*, i8*) {
|
||||
|
||||
define i1 @cmp_different_range(i8*, i8*) {
|
||||
; CHECK-LABEL: @cmp_different_range
|
||||
; CHECK-NEXT: %v1 = load i8, i8* %0, !range !1
|
||||
; CHECK-NEXT: %v2 = load i8, i8* %1, !range !1
|
||||
; CHECK-NEXT: %v1 = load i8, i8* %0, align 1, !range !1
|
||||
; CHECK-NEXT: %v2 = load i8, i8* %1, align 1, !range !1
|
||||
; CHECK-NEXT: %out = icmp eq i8 %v1, %v2
|
||||
; CHECK-NEXT: ret i1 %out
|
||||
%v1 = load i8, i8* %0, !range !1
|
||||
|
@ -20,8 +20,8 @@ define i1 @cmp_no_range(i8*, i8*) {
|
||||
|
||||
define i1 @cmp_different_range(i8*, i8*) {
|
||||
; CHECK-LABEL: @cmp_different_range
|
||||
; CHECK-NEXT: %v1 = load i8, i8* %0, !range !1
|
||||
; CHECK-NEXT: %v2 = load i8, i8* %1, !range !1
|
||||
; CHECK-NEXT: %v1 = load i8, i8* %0, align 1, !range !1
|
||||
; CHECK-NEXT: %v2 = load i8, i8* %1, align 1, !range !1
|
||||
; CHECK-NEXT: %out = icmp eq i8 %v1, %v2
|
||||
; CHECK-NEXT: ret i1 %out
|
||||
%v1 = load i8, i8* %0, !range !1
|
||||
|
@ -1,5 +1,3 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
|
||||
; RUN: opt %s -newgvn -S | FileCheck %s
|
||||
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
@ -7,7 +5,7 @@ target triple = "x86_64-unknown-linux-gnu"
|
||||
define i8* @test1(i8** %v0, i8** %v1) {
|
||||
; CHECK-LABEL: @test1(
|
||||
; CHECK-NEXT: top:
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], !nonnull !0
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], align 8, !nonnull !0
|
||||
; CHECK-NEXT: store i8* [[V2]], i8** [[V1:%.*]]
|
||||
; CHECK-NEXT: ret i8* [[V2]]
|
||||
;
|
||||
@ -91,7 +89,7 @@ bb2:
|
||||
define i8* @test5(i8** %v0) {
|
||||
; CHECK-LABEL: @test5(
|
||||
; CHECK-NEXT: top:
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], !nonnull !0
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], align 8, !nonnull !0
|
||||
; CHECK-NEXT: call void @use1(i8* [[V1]])
|
||||
; CHECK-NEXT: br i1 undef, label [[BB1:%.*]], label [[BB2:%.*]]
|
||||
; CHECK: bb1:
|
||||
@ -118,7 +116,7 @@ define i8* @test6(i8** %v0, i8** %v1) {
|
||||
; CHECK-NEXT: top:
|
||||
; CHECK-NEXT: br i1 undef, label [[BB1:%.*]], label [[BB2:%.*]]
|
||||
; CHECK: bb1:
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], !nonnull !0
|
||||
; CHECK-NEXT: [[V2:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], align 8, !nonnull !0
|
||||
; CHECK-NEXT: store i8* [[V2]], i8** [[V1:%.*]]
|
||||
; CHECK-NEXT: ret i8* [[V2]]
|
||||
; CHECK: bb2:
|
||||
@ -149,7 +147,7 @@ declare void @use2(i8* %a)
|
||||
define i8* @test7(i8** %v0) {
|
||||
; CHECK-LABEL: @test7(
|
||||
; CHECK-NEXT: top:
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], !nonnull !0
|
||||
; CHECK-NEXT: [[V1:%.*]] = load i8*, i8** [[V0:%[a-z0-9]+]], align 8, !nonnull !0
|
||||
; CHECK-NEXT: call void @use2(i8* [[V1]])
|
||||
; CHECK-NEXT: br i1 undef, label [[BB1:%.*]], label [[BB2:%.*]]
|
||||
; CHECK: bb1:
|
||||
|
@ -13,7 +13,7 @@ define i32 @test1(i32* %p, i32* %q) {
|
||||
|
||||
define i32 @test2(i32* %p, i32* %q) {
|
||||
; CHECK-LABEL: @test2(i32* %p, i32* %q)
|
||||
; CHECK: load i32, i32* %p, !alias.scope !0
|
||||
; CHECK: load i32, i32* %p, align 4, !alias.scope !0
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !alias.scope !0
|
||||
%b = load i32, i32* %p, !alias.scope !0
|
||||
@ -27,7 +27,7 @@ define i32 @test2(i32* %p, i32* %q) {
|
||||
; throw in between.
|
||||
define i32 @test3(i32* %p, i32* %q) {
|
||||
; CHECK-LABEL: @test3(i32* %p, i32* %q)
|
||||
; CHECK: load i32, i32* %p, !alias.scope !1
|
||||
; CHECK: load i32, i32* %p, align 4, !alias.scope !1
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !alias.scope !1
|
||||
%b = load i32, i32* %p, !alias.scope !2
|
||||
|
@ -20,7 +20,7 @@ define void @hoge(i32 %arg) {
|
||||
; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ 0, [[BB1:%.*]] ], [ [[ARG:%.*]], [[BB:%.*]] ]
|
||||
; CHECK-NEXT: br label [[BB6:%.*]]
|
||||
; CHECK: bb3:
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* @global, !h !0
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* @global, align 4, !h !0
|
||||
; CHECK-NEXT: unreachable
|
||||
; CHECK: bb6:
|
||||
; CHECK-NEXT: store i32 [[TMP]], i32* @global.1, !h !0
|
||||
|
@ -13,14 +13,14 @@ define %MNR_struct @f000316011717_2(%DS_struct* %pDS, [64 x i64]* %pCG) #2 {
|
||||
; CHECK-NEXT: [[PCARRY:%.*]] = getelementptr [[DS_STRUCT:%.*]], %DS_struct* [[PDS:%.*]], i32 0, i32 1
|
||||
; CHECK-NEXT: [[PBRBASE:%.*]] = getelementptr [[DS_STRUCT]], %DS_struct* [[PDS]], i32 0, i32 0
|
||||
; CHECK-NEXT: [[PBASE:%.*]] = getelementptr [32 x i64*], [32 x i64*]* [[PBRBASE]], i64 0, i64 0
|
||||
; CHECK-NEXT: [[BASE:%.*]] = load i64*, i64** [[PBASE]], !tbaa !14
|
||||
; CHECK-NEXT: [[BASE:%.*]] = load i64*, i64** [[PBASE]], align 8, !tbaa !14
|
||||
; CHECK-NEXT: [[ABSADDR:%.*]] = getelementptr i64, i64* [[BASE]], i64 9
|
||||
; CHECK-NEXT: [[EXTARGET:%.*]] = load i64, i64* [[ABSADDR]], align 8, !tbaa !4
|
||||
; CHECK-NEXT: [[TEMPLATE:%.*]] = icmp eq i64 [[EXTARGET]], 8593987412
|
||||
; CHECK-NEXT: br i1 [[TEMPLATE]], label %"BB3.000316011731#1", label [[BB2_000316011731_5:%.*]]
|
||||
; CHECK: "BB3.000316011731#1":
|
||||
; CHECK-NEXT: [[PBASE8:%.*]] = getelementptr [32 x i64*], [32 x i64*]* [[PBRBASE]], i64 0, i64 29
|
||||
; CHECK-NEXT: [[BASE9:%.*]] = load i64*, i64** [[PBASE8]], !tbaa !14
|
||||
; CHECK-NEXT: [[BASE9:%.*]] = load i64*, i64** [[PBASE8]], align 8, !tbaa !14
|
||||
; CHECK-NEXT: [[ABSADDR1:%.*]] = getelementptr i64, i64* [[BASE9]], i64 7
|
||||
; CHECK-NEXT: [[RMEM:%.*]] = load i64, i64* [[ABSADDR1]], align 8, !tbaa !4
|
||||
; CHECK-NEXT: [[PWT:%.*]] = getelementptr [[DS_STRUCT]], %DS_struct* [[PDS]], i32 0, i32 2
|
||||
@ -39,7 +39,7 @@ define %MNR_struct @f000316011717_2(%DS_struct* %pDS, [64 x i64]* %pCG) #2 {
|
||||
; CHECK-NEXT: store i64 [[VAL]], i64* [[PREG]], align 32, !tbaa !10
|
||||
; CHECK-NEXT: [[PREG2:%.*]] = getelementptr [64 x i64], [64 x i64]* [[PCG]], i64 0, i64 14
|
||||
; CHECK-NEXT: [[REG:%.*]] = load i64, i64* [[PREG2]], align 16, !tbaa !12
|
||||
; CHECK-NEXT: [[BASE2:%.*]] = load i64*, i64** [[PBASE8]], !tbaa !14
|
||||
; CHECK-NEXT: [[BASE2:%.*]] = load i64*, i64** [[PBASE8]], align 8, !tbaa !14
|
||||
; CHECK-NEXT: [[ABSADDR2:%.*]] = getelementptr i64, i64* [[BASE2]], i64 [[REG]]
|
||||
; CHECK-NEXT: [[RMEM2:%.*]] = load i64, i64* [[ABSADDR2]], align 8, !tbaa !1
|
||||
; CHECK-NEXT: [[PREG7:%.*]] = getelementptr [64 x i64], [64 x i64]* [[PCG]], i64 0, i64 9
|
||||
@ -50,7 +50,7 @@ define %MNR_struct @f000316011717_2(%DS_struct* %pDS, [64 x i64]* %pCG) #2 {
|
||||
; CHECK-NEXT: store i8 [[CFL]], i8* [[PCARRY]], align 1, !tbaa !16
|
||||
; CHECK-NEXT: br label [[EXIT]]
|
||||
; CHECK: Exit:
|
||||
; CHECK-NEXT: [[RESTART378:%.*]] = load [[MNR_STRUCT]], %MNR_struct* [[RESTART]]
|
||||
; CHECK-NEXT: [[RESTART378:%.*]] = load [[MNR_STRUCT]], %MNR_struct* [[RESTART]], align 8
|
||||
; CHECK-NEXT: ret [[MNR_STRUCT]] %restart378
|
||||
;
|
||||
Entry:
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
define i32 @test1(i32* %p) {
|
||||
; CHECK-LABEL: @test1(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p, !range !0
|
||||
@ -12,7 +12,7 @@ define i32 @test1(i32* %p) {
|
||||
|
||||
define i32 @test2(i32* %p) {
|
||||
; CHECK-LABEL: @test2(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p
|
||||
@ -22,7 +22,7 @@ define i32 @test2(i32* %p) {
|
||||
|
||||
define i32 @test3(i32* %p) {
|
||||
; CHECK-LABEL: @test3(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p, !range !1
|
||||
@ -32,7 +32,7 @@ define i32 @test3(i32* %p) {
|
||||
|
||||
define i32 @test4(i32* %p) {
|
||||
; CHECK-LABEL: @test4(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE0]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE0]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !0
|
||||
%b = load i32, i32* %p, !range !2
|
||||
@ -42,7 +42,7 @@ define i32 @test4(i32* %p) {
|
||||
|
||||
define i32 @test5(i32* %p) {
|
||||
; CHECK-LABEL: @test5(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE3:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE3:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !3
|
||||
%b = load i32, i32* %p, !range !4
|
||||
@ -52,7 +52,7 @@ define i32 @test5(i32* %p) {
|
||||
|
||||
define i32 @test6(i32* %p) {
|
||||
; CHECK-LABEL: @test6(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE5:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE5:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !5
|
||||
%b = load i32, i32* %p, !range !6
|
||||
@ -62,7 +62,7 @@ define i32 @test6(i32* %p) {
|
||||
|
||||
define i32 @test7(i32* %p) {
|
||||
; CHECK-LABEL: @test7(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE7:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE7:[0-9]+]]
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !7
|
||||
%b = load i32, i32* %p, !range !8
|
||||
@ -72,7 +72,7 @@ define i32 @test7(i32* %p) {
|
||||
|
||||
define i32 @test8(i32* %p) {
|
||||
; CHECK-LABEL: @test8(i32* %p)
|
||||
; CHECK: %a = load i32, i32* %p, !range ![[RANGE9:[0-9]+]]
|
||||
; CHECK: %a = load i32, i32* %p, align 4, !range ![[RANGE9:[0-9]+]]
|
||||
; CHECK-NOT: range
|
||||
; CHECK: %c = add i32 %a, %a
|
||||
%a = load i32, i32* %p, !range !9
|
||||
|
@ -22,7 +22,7 @@ define void @fn2() !dbg !6 {
|
||||
|
||||
define void @fn3() !dbg !8 {
|
||||
; CHECK-LABEL: @fn3
|
||||
; CHECK: load i16, i16* undef, !dbg ![[LOC2:[0-9]+]]
|
||||
; CHECK: load i16, i16* undef, align 2, !dbg ![[LOC2:[0-9]+]]
|
||||
; CHECK-NOT: or i16
|
||||
%instruction = load i16, i16* undef, !dbg !9
|
||||
%dbgless_instruction = or i16 %instruction, 0
|
||||
|
@ -62,7 +62,7 @@ entry:
|
||||
|
||||
define i8 @test_md(i8 addrspace(1)* %ptr) gc "statepoint-example" {
|
||||
; CHECK-LABEL: @test_md(
|
||||
; CHECK: %tmp = load i8, i8 addrspace(1)* %ptr, !tbaa [[TAG_old:!.*]]
|
||||
; CHECK: %tmp = load i8, i8 addrspace(1)* %ptr, align 1, !tbaa [[TAG_old:!.*]]
|
||||
entry:
|
||||
%tmp = load i8, i8 addrspace(1)* %ptr, !tbaa !0
|
||||
call void @foo() [ "deopt"(i32 0, i32 -1, i32 0, i32 0, i32 0) ]
|
||||
@ -72,7 +72,7 @@ entry:
|
||||
; Same as test_md() above, but with new-format TBAA metadata.
|
||||
define i8 @test_md_new(i8 addrspace(1)* %ptr) gc "statepoint-example" {
|
||||
; CHECK-LABEL: @test_md_new(
|
||||
; CHECK: %tmp = load i8, i8 addrspace(1)* %ptr, !tbaa [[TAG_new:!.*]]
|
||||
; CHECK: %tmp = load i8, i8 addrspace(1)* %ptr, align 1, !tbaa [[TAG_new:!.*]]
|
||||
entry:
|
||||
%tmp = load i8, i8 addrspace(1)* %ptr, !tbaa !4
|
||||
call void @foo() [ "deopt"(i32 0, i32 -1, i32 0, i32 0, i32 0) ]
|
||||
|
@ -227,7 +227,7 @@ false:
|
||||
define void @f7_nested_conds(i32* %a, i32 %b) {
|
||||
; CHECK-LABEL: @f7_nested_conds(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[A_V:%.*]] = load i32, i32* [[A:%.*]]
|
||||
; CHECK-NEXT: [[A_V:%.*]] = load i32, i32* [[A:%.*]], align 4
|
||||
; CHECK-NEXT: [[C_1:%.*]] = icmp ne i32 [[A_V]], 0
|
||||
; CHECK-NEXT: br i1 [[C_1]], label [[TRUE:%.*]], label [[FALSE:%.*]]
|
||||
; CHECK: false:
|
||||
|
@ -12,13 +12,13 @@ declare void @unknown()
|
||||
define void @test(float * %a, float * %b, float * %c, float * %d) {
|
||||
; CHECK-LABEL: @test(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[L0:%.*]] = load float, float* [[A:%.*]]
|
||||
; CHECK-NEXT: [[L0:%.*]] = load float, float* [[A:%.*]], align 4
|
||||
; CHECK-NEXT: [[A1:%.*]] = getelementptr inbounds float, float* [[A]], i64 1
|
||||
; CHECK-NEXT: [[L1:%.*]] = load float, float* [[A1]]
|
||||
; CHECK-NEXT: [[L1:%.*]] = load float, float* [[A1]], align 4
|
||||
; CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds float, float* [[A]], i64 2
|
||||
; CHECK-NEXT: [[L2:%.*]] = load float, float* [[A2]]
|
||||
; CHECK-NEXT: [[L2:%.*]] = load float, float* [[A2]], align 4
|
||||
; CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds float, float* [[A]], i64 3
|
||||
; CHECK-NEXT: [[L3:%.*]] = load float, float* [[A3]]
|
||||
; CHECK-NEXT: [[L3:%.*]] = load float, float* [[A3]], align 4
|
||||
; CHECK-NEXT: call void @unknown()
|
||||
; CHECK-NEXT: call void @unknown()
|
||||
; CHECK-NEXT: call void @unknown()
|
||||
|
@ -554,7 +554,7 @@ entry:
|
||||
%s2.next.ptr = getelementptr %S2, %S2* %s2, i64 0, i32 1
|
||||
%s2.next = load %S2*, %S2** %s2.next.ptr, !tbaa !0
|
||||
; CHECK: %[[gep:.*]] = getelementptr %S2, %S2* %s2, i64 0, i32 1
|
||||
; CHECK-NEXT: %[[next:.*]] = load %S2*, %S2** %[[gep]], !tbaa [[TAG_0]]
|
||||
; CHECK-NEXT: %[[next:.*]] = load %S2*, %S2** %[[gep]], align 8, !tbaa [[TAG_0]]
|
||||
|
||||
%s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
|
||||
%s2.next.s1 = load %S1*, %S1** %s2.next.s1.ptr, !tbaa !3
|
||||
@ -565,9 +565,9 @@ entry:
|
||||
%new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
|
||||
store %S2* %s2.next.next, %S2** %new.next.ptr, !tbaa !9
|
||||
; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 0
|
||||
; CHECK-NEXT: %[[next_s1:.*]] = load %S1*, %S1** %[[gep]], !tbaa [[TAG_3]]
|
||||
; CHECK-NEXT: %[[next_s1:.*]] = load %S1*, %S1** %[[gep]], align 8, !tbaa [[TAG_3]]
|
||||
; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 1
|
||||
; CHECK-NEXT: %[[next_next:.*]] = load %S2*, %S2** %[[gep]], !tbaa [[TAG_7]]
|
||||
; CHECK-NEXT: %[[next_next:.*]] = load %S2*, %S2** %[[gep]], align 8, !tbaa [[TAG_7]]
|
||||
|
||||
%new.s1 = load %S1*, %S1** %new.s1.ptr
|
||||
%result1 = insertvalue %S2 undef, %S1* %new.s1, 0
|
||||
|
@ -4,8 +4,8 @@
|
||||
define void @matchingExtensions(i32* %ap, i32* %bp, i64* %result) {
|
||||
; CHECK-LABEL: @matchingExtensions(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[AP:%.*]]
|
||||
; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BP:%.*]]
|
||||
; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[AP:%.*]], align 4
|
||||
; CHECK-NEXT: [[B:%.*]] = load i32, i32* [[BP:%.*]], align 4
|
||||
; CHECK-NEXT: [[EB:%.*]] = sext i32 [[B]] to i64
|
||||
; CHECK-NEXT: [[SUBAB:%.*]] = sub nsw i32 [[A]], [[B]]
|
||||
; CHECK-NEXT: [[EA:%.*]] = sext i32 [[A]] to i64
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
define void @foo(i1 %c, i8* %p) {
|
||||
; CHECK: if:
|
||||
; CHECK-NEXT: load i8, i8* %p, !range !0
|
||||
; CHECK-NEXT: load i8, i8* %p, align 1, !range !0
|
||||
; CHECK: !0 = !{i8 0, i8 1, i8 3, i8 5}
|
||||
if:
|
||||
br i1 %c, label %then, label %else
|
||||
|
@ -160,8 +160,8 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
|
||||
; BASIC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; BASIC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
|
||||
; BASIC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
|
||||
; BASIC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; BASIC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
|
||||
; BASIC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
|
||||
@ -208,8 +208,8 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
|
||||
; ALL-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; ALL-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
|
||||
; ALL-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
|
||||
; ALL-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; ALL-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
|
||||
; ALL-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]), "align"(%struct.S** [[TMP4]], i64 8) ]
|
||||
@ -255,7 +255,7 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
|
||||
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
|
||||
; WITH-AC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; WITH-AC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; WITH-AC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
|
||||
; WITH-AC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; WITH-AC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
|
||||
; WITH-AC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; WITH-AC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
@ -298,7 +298,7 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
|
||||
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
|
||||
; CROSS-BLOCK-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; CROSS-BLOCK-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; CROSS-BLOCK-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
|
||||
; CROSS-BLOCK-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; CROSS-BLOCK-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
|
||||
; CROSS-BLOCK-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; CROSS-BLOCK-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
@ -335,7 +335,7 @@ define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
|
||||
; FULL-SIMPLIFY-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
@ -404,8 +404,8 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
|
||||
; BASIC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
|
||||
; BASIC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8) ]
|
||||
; BASIC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
|
||||
; BASIC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
|
||||
; BASIC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; BASIC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
@ -429,8 +429,8 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
|
||||
; BASIC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; BASIC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ]
|
||||
; BASIC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
|
||||
; BASIC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
|
||||
; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
|
||||
; BASIC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
|
||||
; BASIC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
|
||||
@ -453,8 +453,8 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
|
||||
; ALL-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
|
||||
; ALL-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8) ]
|
||||
; ALL-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8) ]
|
||||
; ALL-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
|
||||
; ALL-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; ALL-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
@ -478,8 +478,8 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 8) ]
|
||||
; ALL-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; ALL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ]
|
||||
; ALL-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
|
||||
; ALL-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
|
||||
; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
|
||||
; ALL-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
|
||||
; ALL-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
|
||||
@ -501,7 +501,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
|
||||
; WITH-AC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
|
||||
; WITH-AC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
|
||||
; WITH-AC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
|
||||
; WITH-AC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
|
||||
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
|
||||
; WITH-AC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; WITH-AC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
@ -521,8 +521,8 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; WITH-AC-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
|
||||
; WITH-AC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; WITH-AC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
|
||||
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ]
|
||||
; WITH-AC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
|
||||
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
|
||||
; WITH-AC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
|
||||
; WITH-AC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
|
||||
; WITH-AC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
|
||||
; WITH-AC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
|
||||
@ -544,7 +544,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4) ]
|
||||
; CROSS-BLOCK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
|
||||
; CROSS-BLOCK-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
|
||||
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
|
||||
; CROSS-BLOCK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
|
||||
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
|
||||
; CROSS-BLOCK-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; CROSS-BLOCK-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
@ -564,8 +564,8 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; CROSS-BLOCK-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
|
||||
; CROSS-BLOCK-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; CROSS-BLOCK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
|
||||
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ]
|
||||
; CROSS-BLOCK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
|
||||
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8) ]
|
||||
; CROSS-BLOCK-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
|
||||
; CROSS-BLOCK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
|
||||
; CROSS-BLOCK-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
|
||||
; CROSS-BLOCK-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
|
||||
@ -583,7 +583,7 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
|
||||
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "align"(%struct.S** [[TMP4]], i64 32), "dereferenceable"(i32** [[TMP5]], i64 8), "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i32* [[TMP8]], i64 4), "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i8* [[TMP11]], i64 1) ]
|
||||
; FULL-SIMPLIFY-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
|
||||
@ -601,8 +601,8 @@ define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true"
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
|
||||
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
|
||||
; FULL-SIMPLIFY-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[TMP17]], i64 4), "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i8* [[TMP20]], i64 1), "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i32** [[TMP25]], i64 8), "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32* [[TMP26]], i64 4), "align"(i32* [[TMP26]], i64 4) ]
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
|
||||
; FULL-SIMPLIFY-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
|
||||
; FULL-SIMPLIFY-NEXT: ret i32 [[TMP28]]
|
||||
|
10
test/Verifier/unsized-types-alloca.ll
Normal file
10
test/Verifier/unsized-types-alloca.ll
Normal file
@ -0,0 +1,10 @@
|
||||
; RUN: not opt -verify < %s 2>&1 | FileCheck %s
|
||||
|
||||
%X = type opaque
|
||||
|
||||
define void @f_2() {
|
||||
%t = alloca %X
|
||||
ret void
|
||||
; CHECK: Cannot allocate unsized type
|
||||
; CHECK-NEXT: %t = alloca %X
|
||||
}
|
10
test/Verifier/unsized-types-load.ll
Normal file
10
test/Verifier/unsized-types-load.ll
Normal file
@ -0,0 +1,10 @@
|
||||
; RUN: not opt -verify < %s 2>&1 | FileCheck %s
|
||||
|
||||
%X = type opaque
|
||||
|
||||
define void @f_0(%X* %ptr) {
|
||||
%t = load %X, %X* %ptr
|
||||
ret void
|
||||
; CHECK: loading unsized types is not allowed
|
||||
; CHECK-NEXT: %t = load %X, %X* %ptr
|
||||
}
|
10
test/Verifier/unsized-types-store.ll
Normal file
10
test/Verifier/unsized-types-store.ll
Normal file
@ -0,0 +1,10 @@
|
||||
; RUN: not opt -verify < %s 2>&1 | FileCheck %s
|
||||
|
||||
%X = type opaque
|
||||
|
||||
define void @f_1(%X %val, %X* %ptr) {
|
||||
store %X %val, %X* %ptr
|
||||
ret void
|
||||
; CHECK: storing unsized types is not allowed
|
||||
; CHECK-NEXT: store %X %val, %X* %ptr
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
; RUN: not opt -verify < %s 2>&1 | FileCheck %s
|
||||
|
||||
%X = type opaque
|
||||
|
||||
define void @f_0(%X* %ptr) {
|
||||
%t = load %X, %X* %ptr
|
||||
ret void
|
||||
; CHECK: loading unsized types is not allowed
|
||||
; CHECK-NEXT: %t = load %X, %X* %ptr
|
||||
}
|
||||
|
||||
define void @f_1(%X %val, %X* %ptr) {
|
||||
store %X %val, %X* %ptr
|
||||
ret void
|
||||
; CHECK: storing unsized types is not allowed
|
||||
; CHECK-NEXT: store %X %val, %X* %ptr
|
||||
}
|
||||
|
||||
define void @f_2() {
|
||||
%t = alloca %X
|
||||
ret void
|
||||
; CHECK: Cannot allocate unsized type
|
||||
; CHECK-NEXT: %t = alloca %X
|
||||
}
|
@ -412,41 +412,6 @@ static int compileModule(char **argv, LLVMContext &Context) {
|
||||
bool SkipModule = codegen::getMCPU() == "help" ||
|
||||
(!MAttrs.empty() && MAttrs.front() == "help");
|
||||
|
||||
// If user just wants to list available options, skip module loading
|
||||
if (!SkipModule) {
|
||||
if (InputLanguage == "mir" ||
|
||||
(InputLanguage == "" && StringRef(InputFilename).endswith(".mir"))) {
|
||||
MIR = createMIRParserFromFile(InputFilename, Err, Context,
|
||||
setMIRFunctionAttributes);
|
||||
if (MIR)
|
||||
M = MIR->parseIRModule();
|
||||
} else
|
||||
M = parseIRFile(InputFilename, Err, Context, false);
|
||||
if (!M) {
|
||||
Err.print(argv[0], WithColor::error(errs(), argv[0]));
|
||||
return 1;
|
||||
}
|
||||
|
||||
// If we are supposed to override the target triple, do so now.
|
||||
if (!TargetTriple.empty())
|
||||
M->setTargetTriple(Triple::normalize(TargetTriple));
|
||||
TheTriple = Triple(M->getTargetTriple());
|
||||
} else {
|
||||
TheTriple = Triple(Triple::normalize(TargetTriple));
|
||||
}
|
||||
|
||||
if (TheTriple.getTriple().empty())
|
||||
TheTriple.setTriple(sys::getDefaultTargetTriple());
|
||||
|
||||
// Get the target specific parser.
|
||||
std::string Error;
|
||||
const Target *TheTarget =
|
||||
TargetRegistry::lookupTarget(codegen::getMArch(), TheTriple, Error);
|
||||
if (!TheTarget) {
|
||||
WithColor::error(errs(), argv[0]) << Error;
|
||||
return 1;
|
||||
}
|
||||
|
||||
CodeGenOpt::Level OLvl = CodeGenOpt::Default;
|
||||
switch (OptLevel) {
|
||||
default:
|
||||
@ -468,26 +433,93 @@ static int compileModule(char **argv, LLVMContext &Context) {
|
||||
Options.MCOptions.IASSearchPaths = IncludeDirs;
|
||||
Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
|
||||
|
||||
// On AIX, setting the relocation model to anything other than PIC is considered
|
||||
// a user error.
|
||||
Optional<Reloc::Model> RM = codegen::getExplicitRelocModel();
|
||||
if (TheTriple.isOSAIX() && RM.hasValue() && *RM != Reloc::PIC_) {
|
||||
WithColor::error(errs(), argv[0])
|
||||
<< "invalid relocation model, AIX only supports PIC.\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::unique_ptr<TargetMachine> Target(TheTarget->createTargetMachine(
|
||||
TheTriple.getTriple(), CPUStr, FeaturesStr, Options, RM,
|
||||
codegen::getExplicitCodeModel(), OLvl));
|
||||
const Target *TheTarget = nullptr;
|
||||
std::unique_ptr<TargetMachine> Target;
|
||||
|
||||
assert(Target && "Could not allocate target machine!");
|
||||
// If user just wants to list available options, skip module loading
|
||||
if (!SkipModule) {
|
||||
auto SetDataLayout =
|
||||
[&](StringRef DataLayoutTargetTriple) -> Optional<std::string> {
|
||||
// If we are supposed to override the target triple, do so now.
|
||||
std::string IRTargetTriple = DataLayoutTargetTriple.str();
|
||||
if (!TargetTriple.empty())
|
||||
IRTargetTriple = Triple::normalize(TargetTriple);
|
||||
TheTriple = Triple(IRTargetTriple);
|
||||
if (TheTriple.getTriple().empty())
|
||||
TheTriple.setTriple(sys::getDefaultTargetTriple());
|
||||
|
||||
// If we don't have a module then just exit now. We do this down
|
||||
// here since the CPU/Feature help is underneath the target machine
|
||||
// creation.
|
||||
if (SkipModule)
|
||||
std::string Error;
|
||||
TheTarget =
|
||||
TargetRegistry::lookupTarget(codegen::getMArch(), TheTriple, Error);
|
||||
if (!TheTarget) {
|
||||
WithColor::error(errs(), argv[0]) << Error;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// On AIX, setting the relocation model to anything other than PIC is
|
||||
// considered a user error.
|
||||
if (TheTriple.isOSAIX() && RM.hasValue() && *RM != Reloc::PIC_) {
|
||||
WithColor::error(errs(), argv[0])
|
||||
<< "invalid relocation model, AIX only supports PIC.\n";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
Target = std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine(
|
||||
TheTriple.getTriple(), CPUStr, FeaturesStr, Options, RM,
|
||||
codegen::getExplicitCodeModel(), OLvl));
|
||||
assert(Target && "Could not allocate target machine!");
|
||||
|
||||
return Target->createDataLayout().getStringRepresentation();
|
||||
};
|
||||
if (InputLanguage == "mir" ||
|
||||
(InputLanguage == "" && StringRef(InputFilename).endswith(".mir"))) {
|
||||
MIR = createMIRParserFromFile(InputFilename, Err, Context,
|
||||
setMIRFunctionAttributes);
|
||||
if (MIR)
|
||||
M = MIR->parseIRModule(SetDataLayout);
|
||||
} else {
|
||||
M = parseIRFile(InputFilename, Err, Context, SetDataLayout);
|
||||
}
|
||||
if (!M) {
|
||||
Err.print(argv[0], WithColor::error(errs(), argv[0]));
|
||||
return 1;
|
||||
}
|
||||
if (!TargetTriple.empty())
|
||||
M->setTargetTriple(Triple::normalize(TargetTriple));
|
||||
} else {
|
||||
TheTriple = Triple(Triple::normalize(TargetTriple));
|
||||
if (TheTriple.getTriple().empty())
|
||||
TheTriple.setTriple(sys::getDefaultTargetTriple());
|
||||
|
||||
// Get the target specific parser.
|
||||
std::string Error;
|
||||
TheTarget =
|
||||
TargetRegistry::lookupTarget(codegen::getMArch(), TheTriple, Error);
|
||||
if (!TheTarget) {
|
||||
WithColor::error(errs(), argv[0]) << Error;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// On AIX, setting the relocation model to anything other than PIC is
|
||||
// considered a user error.
|
||||
if (TheTriple.isOSAIX() && RM.hasValue() && *RM != Reloc::PIC_) {
|
||||
WithColor::error(errs(), argv[0])
|
||||
<< "invalid relocation model, AIX only supports PIC.\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
Target = std::unique_ptr<TargetMachine>(TheTarget->createTargetMachine(
|
||||
TheTriple.getTriple(), CPUStr, FeaturesStr, Options, RM,
|
||||
codegen::getExplicitCodeModel(), OLvl));
|
||||
assert(Target && "Could not allocate target machine!");
|
||||
|
||||
// If we don't have a module then just exit now. We do this down
|
||||
// here since the CPU/Feature help is underneath the target machine
|
||||
// creation.
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(M && "Should have exited if we didn't have a module!");
|
||||
if (codegen::getFloatABIForCalls() != FloatABI::Default)
|
||||
@ -520,13 +552,6 @@ static int compileModule(char **argv, LLVMContext &Context) {
|
||||
TLII.disableAllFunctions();
|
||||
PM.add(new TargetLibraryInfoWrapperPass(TLII));
|
||||
|
||||
// Add the target data from the target machine, if it exists, or the module.
|
||||
M->setDataLayout(Target->createDataLayout());
|
||||
|
||||
// This needs to be done after setting datalayout since it calls verifier
|
||||
// to check debug info whereas verifier relies on correct datalayout.
|
||||
UpgradeDebugInfo(*M);
|
||||
|
||||
// Verify module immediately to catch problems before doInitialization() is
|
||||
// called on any passes.
|
||||
if (!NoVerify && verifyModule(*M, &errs())) {
|
||||
|
@ -121,8 +121,19 @@ int main(int argc, char **argv) {
|
||||
|
||||
// Parse the file now...
|
||||
SMDiagnostic Err;
|
||||
auto ModuleAndIndex = parseAssemblyFileWithIndex(
|
||||
InputFilename, Err, Context, nullptr, !DisableVerify, ClDataLayout);
|
||||
auto SetDataLayout = [](StringRef) -> Optional<std::string> {
|
||||
if (ClDataLayout.empty())
|
||||
return None;
|
||||
return ClDataLayout;
|
||||
};
|
||||
ParsedModuleAndIndex ModuleAndIndex;
|
||||
if (DisableVerify) {
|
||||
ModuleAndIndex = parseAssemblyFileWithIndexNoUpgradeDebugInfo(
|
||||
InputFilename, Err, Context, nullptr, SetDataLayout);
|
||||
} else {
|
||||
ModuleAndIndex = parseAssemblyFileWithIndex(InputFilename, Err, Context,
|
||||
nullptr, SetDataLayout);
|
||||
}
|
||||
std::unique_ptr<Module> M = std::move(ModuleAndIndex.Mod);
|
||||
if (!M.get()) {
|
||||
Err.print(argv[0], errs());
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "llvm/Analysis/RegionPass.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
#include "llvm/AsmParser/Parser.h"
|
||||
#include "llvm/Bitcode/BitcodeWriterPass.h"
|
||||
#include "llvm/CodeGen/CommandFlags.h"
|
||||
#include "llvm/CodeGen/TargetPassConfig.h"
|
||||
@ -118,8 +119,12 @@ static cl::opt<std::string> ThinLinkBitcodeFile(
|
||||
static cl::opt<bool>
|
||||
NoVerify("disable-verify", cl::desc("Do not run the verifier"), cl::Hidden);
|
||||
|
||||
static cl::opt<bool>
|
||||
VerifyEach("verify-each", cl::desc("Verify after each transform"));
|
||||
static cl::opt<bool> NoUpgradeDebugInfo("disable-upgrade-debug-info",
|
||||
cl::desc("Generate invalid output"),
|
||||
cl::ReallyHidden);
|
||||
|
||||
static cl::opt<bool> VerifyEach("verify-each",
|
||||
cl::desc("Verify after each transform"));
|
||||
|
||||
static cl::opt<bool>
|
||||
DisableDITypeMap("disable-debug-info-type-map",
|
||||
@ -616,8 +621,18 @@ int main(int argc, char **argv) {
|
||||
std::unique_ptr<ToolOutputFile> RemarksFile = std::move(*RemarksFileOrErr);
|
||||
|
||||
// Load the input module...
|
||||
std::unique_ptr<Module> M =
|
||||
parseIRFile(InputFilename, Err, Context, !NoVerify, ClDataLayout);
|
||||
auto SetDataLayout = [](StringRef) -> Optional<std::string> {
|
||||
if (ClDataLayout.empty())
|
||||
return None;
|
||||
return ClDataLayout;
|
||||
};
|
||||
std::unique_ptr<Module> M;
|
||||
if (NoUpgradeDebugInfo)
|
||||
M = parseAssemblyFileWithIndexNoUpgradeDebugInfo(
|
||||
InputFilename, Err, Context, nullptr, SetDataLayout)
|
||||
.Mod;
|
||||
else
|
||||
M = parseIRFile(InputFilename, Err, Context, SetDataLayout);
|
||||
|
||||
if (!M) {
|
||||
Err.print(argv[0], errs());
|
||||
|
Loading…
Reference in New Issue
Block a user