diff --git a/docs/LangRef.rst b/docs/LangRef.rst index 934c69dde03..f2276905ece 100644 --- a/docs/LangRef.rst +++ b/docs/LangRef.rst @@ -1644,6 +1644,12 @@ example: If a function that has an ``sspstrong`` attribute is inlined into a function that doesn't have an ``sspstrong`` attribute, then the resulting function will have an ``sspstrong`` attribute. +``strictfp`` + This attribute indicates that the function was called from a scope that + requires strict floating point semantics. LLVM will not attempt any + optimizations that require assumptions about the floating point rounding + mode or that might alter the state of floating point status flags that + might otherwise be set or cleared by calling this function. ``"thunk"`` This attribute indicates that the function will delegate to some other function with a tail call. The prototype of a thunk should not be used for diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h index 3777f956cf2..9f869639399 100644 --- a/include/llvm/Bitcode/LLVMBitCodes.h +++ b/include/llvm/Bitcode/LLVMBitCodes.h @@ -558,7 +558,8 @@ enum AttributeKindCodes { ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50, ATTR_KIND_ALLOC_SIZE = 51, ATTR_KIND_WRITEONLY = 52, - ATTR_KIND_SPECULATABLE = 53 + ATTR_KIND_SPECULATABLE = 53, + ATTR_KIND_STRICT_FP = 54, }; enum ComdatSelectionKindCodes { diff --git a/include/llvm/IR/Attributes.td b/include/llvm/IR/Attributes.td index 616387816bf..76284babc1a 100644 --- a/include/llvm/IR/Attributes.td +++ b/include/llvm/IR/Attributes.td @@ -149,6 +149,9 @@ def StackProtectReq : EnumAttr<"sspreq">; /// Strong Stack protection. def StackProtectStrong : EnumAttr<"sspstrong">; +/// Function was called in a scope requiring strict floating point semantics. +def StrictFP : EnumAttr<"strictfp">; + /// Hidden pointer to structure to return. def StructRet : EnumAttr<"sret">; diff --git a/include/llvm/IR/CallSite.h b/include/llvm/IR/CallSite.h index 01fee548f0d..479a8e2f241 100644 --- a/include/llvm/IR/CallSite.h +++ b/include/llvm/IR/CallSite.h @@ -426,6 +426,11 @@ public: CALLSITE_DELEGATE_GETTER(isNoBuiltin()); } + /// Return true if the call requires strict floating point semantics. + bool isStrictFP() const { + CALLSITE_DELEGATE_GETTER(isStrictFP()); + } + /// Return true if the call should not be inlined. bool isNoInline() const { CALLSITE_DELEGATE_GETTER(isNoInline()); diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h index 60ae98869e5..908d4bcc069 100644 --- a/include/llvm/IR/Instructions.h +++ b/include/llvm/IR/Instructions.h @@ -1757,6 +1757,9 @@ public: !hasFnAttrImpl(Attribute::Builtin); } + /// Determine if the call requires strict floating point semantics. + bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); } + /// Return true if the call should not be inlined. bool isNoInline() const { return hasFnAttr(Attribute::NoInline); } void setIsNoInline() { @@ -3844,6 +3847,9 @@ public: !hasFnAttrImpl(Attribute::Builtin); } + /// Determine if the call requires strict floating point semantics. + bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); } + /// Return true if the call should not be inlined. bool isNoInline() const { return hasFnAttr(Attribute::NoInline); } void setIsNoInline() { diff --git a/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/include/llvm/Transforms/Utils/SimplifyLibCalls.h index 92c64a40e93..9b780f8eef5 100644 --- a/include/llvm/Transforms/Utils/SimplifyLibCalls.h +++ b/include/llvm/Transforms/Utils/SimplifyLibCalls.h @@ -137,6 +137,9 @@ private: Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B); Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B); Value *optimizeTan(CallInst *CI, IRBuilder<> &B); + // Wrapper for all floating point library call optimizations + Value *optimizeFloatingPointLibCall(CallInst *CI, LibFunc Func, + IRBuilder<> &B); // Integer Library Call Optimizations Value *optimizeFFS(CallInst *CI, IRBuilder<> &B); diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 0f5ec3f5626..e88b8f14d54 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -1359,7 +1359,7 @@ llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, // bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) { - if (CS.isNoBuiltin()) + if (CS.isNoBuiltin() || CS.isStrictFP()) return false; switch (F->getIntrinsicID()) { case Intrinsic::fabs: @@ -2066,7 +2066,7 @@ Constant * llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F, ArrayRef Operands, const TargetLibraryInfo *TLI) { - if (CS.isNoBuiltin()) + if (CS.isNoBuiltin() || CS.isStrictFP()) return nullptr; if (!F->hasName()) return nullptr; @@ -2084,7 +2084,7 @@ llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F, bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) { // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap // (and to some extent ConstantFoldScalarCall). - if (CS.isNoBuiltin()) + if (CS.isNoBuiltin() || CS.isStrictFP()) return false; Function *F = CS.getCalledFunction(); if (!F) diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp index 90e0d6a216e..5ce55f52276 100644 --- a/lib/AsmParser/LLLexer.cpp +++ b/lib/AsmParser/LLLexer.cpp @@ -654,6 +654,7 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(ssp); KEYWORD(sspreq); KEYWORD(sspstrong); + KEYWORD(strictfp); KEYWORD(safestack); KEYWORD(sanitize_address); KEYWORD(sanitize_thread); diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index 13679ce1d25..828f873d37b 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -1121,6 +1121,7 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B, B.addAttribute(Attribute::SanitizeThread); break; case lltok::kw_sanitize_memory: B.addAttribute(Attribute::SanitizeMemory); break; + case lltok::kw_strictfp: B.addAttribute(Attribute::StrictFP); break; case lltok::kw_uwtable: B.addAttribute(Attribute::UWTable); break; case lltok::kw_writeonly: B.addAttribute(Attribute::WriteOnly); break; @@ -1446,6 +1447,7 @@ bool LLParser::ParseOptionalParamAttrs(AttrBuilder &B) { case lltok::kw_sspreq: case lltok::kw_sspstrong: case lltok::kw_safestack: + case lltok::kw_strictfp: case lltok::kw_uwtable: HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute"); break; @@ -1537,6 +1539,7 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) { case lltok::kw_sspreq: case lltok::kw_sspstrong: case lltok::kw_safestack: + case lltok::kw_strictfp: case lltok::kw_uwtable: HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute"); break; diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h index 0f3707ba0d1..09e502d7a35 100644 --- a/lib/AsmParser/LLToken.h +++ b/lib/AsmParser/LLToken.h @@ -207,6 +207,7 @@ enum Kind { kw_sret, kw_sanitize_thread, kw_sanitize_memory, + kw_strictfp, kw_swifterror, kw_swiftself, kw_uwtable, diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp index f8cdbcf530c..d526760c966 100644 --- a/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1137,6 +1137,7 @@ static uint64_t getRawAttributeMask(Attribute::AttrKind Val) { case Attribute::SwiftError: return 1ULL << 52; case Attribute::WriteOnly: return 1ULL << 53; case Attribute::Speculatable: return 1ULL << 54; + case Attribute::StrictFP: return 1ULL << 55; case Attribute::Dereferenceable: llvm_unreachable("dereferenceable attribute not supported in raw format"); break; @@ -1345,6 +1346,8 @@ static Attribute::AttrKind getAttrFromCode(uint64_t Code) { return Attribute::StackProtectStrong; case bitc::ATTR_KIND_SAFESTACK: return Attribute::SafeStack; + case bitc::ATTR_KIND_STRICT_FP: + return Attribute::StrictFP; case bitc::ATTR_KIND_STRUCT_RET: return Attribute::StructRet; case bitc::ATTR_KIND_SANITIZE_ADDRESS: diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index ac54f16136b..fc58718df1f 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -610,6 +610,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) { return bitc::ATTR_KIND_STACK_PROTECT_STRONG; case Attribute::SafeStack: return bitc::ATTR_KIND_SAFESTACK; + case Attribute::StrictFP: + return bitc::ATTR_KIND_STRICT_FP; case Attribute::StructRet: return bitc::ATTR_KIND_STRUCT_RET; case Attribute::SanitizeAddress: diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index c7da2460356..4c8c470347b 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6555,10 +6555,10 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) { // Check for well-known libc/libm calls. If the function is internal, it // can't be a library call. Don't do the check if marked as nobuiltin for - // some reason. + // some reason or the call site requires strict floating point semantics. LibFunc Func; - if (!I.isNoBuiltin() && !F->hasLocalLinkage() && F->hasName() && - LibInfo->getLibFunc(*F, Func) && + if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() && + F->hasName() && LibInfo->getLibFunc(*F, Func) && LibInfo->hasOptimizedCodeGen(Func)) { switch (Func) { default: break; diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp index 8f2e641d64b..54b9761bd03 100644 --- a/lib/IR/Attributes.cpp +++ b/lib/IR/Attributes.cpp @@ -327,6 +327,8 @@ std::string Attribute::getAsString(bool InAttrGrp) const { return "sspstrong"; if (hasAttribute(Attribute::SafeStack)) return "safestack"; + if (hasAttribute(Attribute::StrictFP)) + return "strictfp"; if (hasAttribute(Attribute::StructRet)) return "sret"; if (hasAttribute(Attribute::SanitizeThread)) diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp index e7362ea99b5..84c93c440fd 100644 --- a/lib/IR/Verifier.cpp +++ b/lib/IR/Verifier.cpp @@ -1377,6 +1377,7 @@ static bool isFuncOnlyAttr(Attribute::AttrKind Kind) { case Attribute::InaccessibleMemOrArgMemOnly: case Attribute::AllocSize: case Attribute::Speculatable: + case Attribute::StrictFP: return true; default: break; diff --git a/lib/Transforms/IPO/ForceFunctionAttrs.cpp b/lib/Transforms/IPO/ForceFunctionAttrs.cpp index 96871213820..e48c3d73237 100644 --- a/lib/Transforms/IPO/ForceFunctionAttrs.cpp +++ b/lib/Transforms/IPO/ForceFunctionAttrs.cpp @@ -57,6 +57,7 @@ static Attribute::AttrKind parseAttrKind(StringRef Kind) { .Case("ssp", Attribute::StackProtect) .Case("sspreq", Attribute::StackProtectReq) .Case("sspstrong", Attribute::StackProtectStrong) + .Case("strictfp", Attribute::StrictFP) .Case("uwtable", Attribute::UWTable) .Default(Attribute::None); } diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp index 2a1a82183db..8257dbcf858 100644 --- a/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -2041,13 +2041,103 @@ Value *LibCallSimplifier::optimizeStringMemoryLibCall(CallInst *CI, return nullptr; } +Value *LibCallSimplifier::optimizeFloatingPointLibCall(CallInst *CI, + LibFunc Func, + IRBuilder<> &Builder) { + // Don't optimize calls that require strict floating point semantics. + if (CI->isStrictFP()) + return nullptr; + + switch (Func) { + case LibFunc_cosf: + case LibFunc_cos: + case LibFunc_cosl: + return optimizeCos(CI, Builder); + case LibFunc_sinpif: + case LibFunc_sinpi: + case LibFunc_cospif: + case LibFunc_cospi: + return optimizeSinCosPi(CI, Builder); + case LibFunc_powf: + case LibFunc_pow: + case LibFunc_powl: + return optimizePow(CI, Builder); + case LibFunc_exp2l: + case LibFunc_exp2: + case LibFunc_exp2f: + return optimizeExp2(CI, Builder); + case LibFunc_fabsf: + case LibFunc_fabs: + case LibFunc_fabsl: + return replaceUnaryCall(CI, Builder, Intrinsic::fabs); + case LibFunc_sqrtf: + case LibFunc_sqrt: + case LibFunc_sqrtl: + return optimizeSqrt(CI, Builder); + case LibFunc_log: + case LibFunc_log10: + case LibFunc_log1p: + case LibFunc_log2: + case LibFunc_logb: + return optimizeLog(CI, Builder); + case LibFunc_tan: + case LibFunc_tanf: + case LibFunc_tanl: + return optimizeTan(CI, Builder); + case LibFunc_ceil: + return replaceUnaryCall(CI, Builder, Intrinsic::ceil); + case LibFunc_floor: + return replaceUnaryCall(CI, Builder, Intrinsic::floor); + case LibFunc_round: + return replaceUnaryCall(CI, Builder, Intrinsic::round); + case LibFunc_nearbyint: + return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint); + case LibFunc_rint: + return replaceUnaryCall(CI, Builder, Intrinsic::rint); + case LibFunc_trunc: + return replaceUnaryCall(CI, Builder, Intrinsic::trunc); + case LibFunc_acos: + case LibFunc_acosh: + case LibFunc_asin: + case LibFunc_asinh: + case LibFunc_atan: + case LibFunc_atanh: + case LibFunc_cbrt: + case LibFunc_cosh: + case LibFunc_exp: + case LibFunc_exp10: + case LibFunc_expm1: + case LibFunc_sin: + case LibFunc_sinh: + case LibFunc_tanh: + if (UnsafeFPShrink && hasFloatVersion(CI->getCalledFunction()->getName())) + return optimizeUnaryDoubleFP(CI, Builder, true); + return nullptr; + case LibFunc_copysign: + if (hasFloatVersion(CI->getCalledFunction()->getName())) + return optimizeBinaryDoubleFP(CI, Builder); + return nullptr; + case LibFunc_fminf: + case LibFunc_fmin: + case LibFunc_fminl: + case LibFunc_fmaxf: + case LibFunc_fmax: + case LibFunc_fmaxl: + return optimizeFMinFMax(CI, Builder); + default: + return nullptr; + } +} + Value *LibCallSimplifier::optimizeCall(CallInst *CI) { + // TODO: Split out the code below that operates on FP calls so that + // we can all non-FP calls with the StrictFP attribute to be + // optimized. if (CI->isNoBuiltin()) return nullptr; LibFunc Func; Function *Callee = CI->getCalledFunction(); - StringRef FuncName = Callee->getName(); SmallVector OpBundles; CI->getOperandBundlesAsDefs(OpBundles); @@ -2055,6 +2145,8 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) { bool isCallingConvC = isCallingConvCCompatible(CI); // Command-line parameter overrides instruction attribute. + // This can't be moved to optimizeFloatingPointLibCall() because it may be + // used by the intrinsic optimizations. if (EnableUnsafeFPShrink.getNumOccurrences() > 0) UnsafeFPShrink = EnableUnsafeFPShrink; else if (isa(CI) && CI->hasUnsafeAlgebra()) @@ -2064,6 +2156,8 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) { if (IntrinsicInst *II = dyn_cast(CI)) { if (!isCallingConvC) return nullptr; + // The FP intrinsics have corresponding constrained versions so we don't + // need to check for the StrictFP attribute here. switch (II->getIntrinsicID()) { case Intrinsic::pow: return optimizePow(CI, Builder); @@ -2104,32 +2198,9 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) { return nullptr; if (Value *V = optimizeStringMemoryLibCall(CI, Builder)) return V; + if (Value *V = optimizeFloatingPointLibCall(CI, Func, Builder)) + return V; switch (Func) { - case LibFunc_cosf: - case LibFunc_cos: - case LibFunc_cosl: - return optimizeCos(CI, Builder); - case LibFunc_sinpif: - case LibFunc_sinpi: - case LibFunc_cospif: - case LibFunc_cospi: - return optimizeSinCosPi(CI, Builder); - case LibFunc_powf: - case LibFunc_pow: - case LibFunc_powl: - return optimizePow(CI, Builder); - case LibFunc_exp2l: - case LibFunc_exp2: - case LibFunc_exp2f: - return optimizeExp2(CI, Builder); - case LibFunc_fabsf: - case LibFunc_fabs: - case LibFunc_fabsl: - return replaceUnaryCall(CI, Builder, Intrinsic::fabs); - case LibFunc_sqrtf: - case LibFunc_sqrt: - case LibFunc_sqrtl: - return optimizeSqrt(CI, Builder); case LibFunc_ffs: case LibFunc_ffsl: case LibFunc_ffsll: @@ -2158,18 +2229,8 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) { return optimizeFWrite(CI, Builder); case LibFunc_fputs: return optimizeFPuts(CI, Builder); - case LibFunc_log: - case LibFunc_log10: - case LibFunc_log1p: - case LibFunc_log2: - case LibFunc_logb: - return optimizeLog(CI, Builder); case LibFunc_puts: return optimizePuts(CI, Builder); - case LibFunc_tan: - case LibFunc_tanf: - case LibFunc_tanl: - return optimizeTan(CI, Builder); case LibFunc_perror: return optimizeErrorReporting(CI, Builder); case LibFunc_vfprintf: @@ -2177,46 +2238,6 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI) { return optimizeErrorReporting(CI, Builder, 0); case LibFunc_fputc: return optimizeErrorReporting(CI, Builder, 1); - case LibFunc_ceil: - return replaceUnaryCall(CI, Builder, Intrinsic::ceil); - case LibFunc_floor: - return replaceUnaryCall(CI, Builder, Intrinsic::floor); - case LibFunc_round: - return replaceUnaryCall(CI, Builder, Intrinsic::round); - case LibFunc_nearbyint: - return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint); - case LibFunc_rint: - return replaceUnaryCall(CI, Builder, Intrinsic::rint); - case LibFunc_trunc: - return replaceUnaryCall(CI, Builder, Intrinsic::trunc); - case LibFunc_acos: - case LibFunc_acosh: - case LibFunc_asin: - case LibFunc_asinh: - case LibFunc_atan: - case LibFunc_atanh: - case LibFunc_cbrt: - case LibFunc_cosh: - case LibFunc_exp: - case LibFunc_exp10: - case LibFunc_expm1: - case LibFunc_sin: - case LibFunc_sinh: - case LibFunc_tanh: - if (UnsafeFPShrink && hasFloatVersion(FuncName)) - return optimizeUnaryDoubleFP(CI, Builder, true); - return nullptr; - case LibFunc_copysign: - if (hasFloatVersion(FuncName)) - return optimizeBinaryDoubleFP(CI, Builder); - return nullptr; - case LibFunc_fminf: - case LibFunc_fmin: - case LibFunc_fminl: - case LibFunc_fmaxf: - case LibFunc_fmax: - case LibFunc_fmaxl: - return optimizeFMinFMax(CI, Builder); default: return nullptr; } diff --git a/test/Bitcode/compatibility.ll b/test/Bitcode/compatibility.ll index 7df1535a692..367158d206d 100644 --- a/test/Bitcode/compatibility.ll +++ b/test/Bitcode/compatibility.ll @@ -608,6 +608,7 @@ declare void @f.inaccessiblememonly() inaccessiblememonly ; CHECK: declare void @f.inaccessiblememonly() #33 declare void @f.inaccessiblemem_or_argmemonly() inaccessiblemem_or_argmemonly ; CHECK: declare void @f.inaccessiblemem_or_argmemonly() #34 +declare void @f.strictfp() #35 ; Functions -- section declare void @f.section() section "80" @@ -1252,6 +1253,9 @@ exit: call void @f.nobuiltin() builtin ; CHECK: call void @f.nobuiltin() #42 + call void @f.strictfp() strictfp + ; CHECK: call void @f.strictfp() #43 + call fastcc noalias i32* @f.noalias() noinline ; CHECK: call fastcc noalias i32* @f.noalias() #12 tail call ghccc nonnull i32* @f.nonnull() minsize @@ -1670,6 +1674,7 @@ define i8** @constexpr() { ; CHECK: attributes #40 = { writeonly } ; CHECK: attributes #41 = { speculatable } ; CHECK: attributes #42 = { builtin } +; CHECK: attributes #43 = { strictfp } ;; Metadata diff --git a/test/Transforms/DCE/calls-errno.ll b/test/Transforms/DCE/calls-errno.ll index 415caae0fe6..20ee0d06d3a 100644 --- a/test/Transforms/DCE/calls-errno.ll +++ b/test/Transforms/DCE/calls-errno.ll @@ -76,6 +76,10 @@ entry: ; CHECK-NEXT: %cos3 = call double @cos(double 0.000000e+00) %cos3 = call double @cos(double 0.000000e+00) nobuiltin +; cos(1) strictfp sets FP status flags +; CHECK-NEXT: %cos4 = call double @cos(double 1.000000e+00) + %cos4 = call double @cos(double 1.000000e+00) strictfp + ; pow(0, 1) is 0 %pow1 = call double @pow(double 0x7FF0000000000000, double 1.000000e+00) diff --git a/test/Transforms/InstCombine/constant-fold-libfunc.ll b/test/Transforms/InstCombine/constant-fold-libfunc.ll index c969b65a4e7..5d1aa821ea1 100644 --- a/test/Transforms/InstCombine/constant-fold-libfunc.ll +++ b/test/Transforms/InstCombine/constant-fold-libfunc.ll @@ -12,9 +12,20 @@ define double @test_simplify_acos() { ret double %pi } +; Check that we don't constant fold builtin functions. + define double @test_acos_nobuiltin() { ; CHECK-LABEL: @test_acos_nobuiltin %pi = call double @acos(double -1.000000e+00) nobuiltin ; CHECK: call double @acos(double -1.000000e+00) ret double %pi } + +; Check that we don't constant fold strictfp results that require rounding. + +define double @test_acos_strictfp() { +; CHECK-LABEL: @test_acos_strictfp + %pi = call double @acos(double -1.000000e+00) strictfp +; CHECK: call double @acos(double -1.000000e+00) + ret double %pi +} diff --git a/test/Transforms/InstCombine/memcpy-1.ll b/test/Transforms/InstCombine/memcpy-1.ll index 76dce279b89..b373ea2619f 100644 --- a/test/Transforms/InstCombine/memcpy-1.ll +++ b/test/Transforms/InstCombine/memcpy-1.ll @@ -17,3 +17,12 @@ define i8* @test_simplify1(i8* %mem1, i8* %mem2, i32 %size) { ret i8* %ret } +; Verify that the strictfp attr doesn't block this optimization. + +define i8* @test_simplify2(i8* %mem1, i8* %mem2, i32 %size) { +; CHECK-LABEL: @test_simplify2( + %ret = call i8* @memcpy(i8* %mem1, i8* %mem2, i32 %size) strictfp +; CHECK: call void @llvm.memcpy + ret i8* %ret +; CHECK: ret i8* %mem1 +} diff --git a/utils/vim/syntax/llvm.vim b/utils/vim/syntax/llvm.vim index d047578802b..0b84116b40b 100644 --- a/utils/vim/syntax/llvm.vim +++ b/utils/vim/syntax/llvm.vim @@ -144,6 +144,7 @@ syn keyword llvmKeyword \ ssp \ sspreq \ sspstrong + \ strictfp \ swiftcc \ tail \ target