diff --git a/CMakeLists.txt b/CMakeLists.txt index f460892e8f4..abca779c3ac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -191,7 +191,6 @@ if(CLR_CMAKE_PLATFORM_XPLAT) -DUNICODE -D_SAFECRT_USE_CPP_OVERLOADS=1 -D__STDC_WANT_LIB_EXT1__=1 - -DDISABLE_JIT=1 # xplat-todo: enable the JIT for Linux ) set(CMAKE_CXX_STANDARD 11) @@ -254,7 +253,15 @@ if(CLR_CMAKE_PLATFORM_XPLAT) add_compile_options( -fasm-blocks -fms-extensions + -fwrapv # Treat signed integer overflow as two's complement ) + + # Clang -fsanitize. + if (CLANG_SANITIZE_SH) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=${CLANG_SANITIZE_SH}") + set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -fsanitize=${CLANG_SANITIZE_SH}") + unset(CLANG_SANITIZE_SH CACHE) # don't cache + endif() endif(CLR_CMAKE_PLATFORM_XPLAT) if(CMAKE_BUILD_TYPE STREQUAL Debug) @@ -287,9 +294,19 @@ if(CLR_CMAKE_PLATFORM_XPLAT) add_definitions(-DFEATURE_PAL) endif(CLR_CMAKE_PLATFORM_XPLAT) -if(WITHOUT_FEATURES) - add_definitions(${WITHOUT_FEATURES}) -endif(WITHOUT_FEATURES) +if(NO_JIT_SH + OR CLR_CMAKE_PLATFORM_DARWIN) # TODO: JIT for OSX + unset(NO_JIT_SH CACHE) # don't cache + unset(BuildJIT CACHE) # also clear it just in case + add_definitions(-DDISABLE_JIT=1) +else() + set(BuildJIT 1) +endif() + +if(WITHOUT_FEATURES_SH) + unset(WITHOUT_FEATURES_SH CACHE) # don't cache + add_definitions(${WITHOUT_FEATURES_SH}) +endif(WITHOUT_FEATURES_SH) enable_language(ASM) diff --git a/bin/ChakraCore/TestHooks.cpp b/bin/ChakraCore/TestHooks.cpp index 7933f997f3b..dd7e30e54b8 100644 --- a/bin/ChakraCore/TestHooks.cpp +++ b/bin/ChakraCore/TestHooks.cpp @@ -48,7 +48,7 @@ void __stdcall ConnectJITServer(HANDLE processHandle, void* serverSecurityDescri ThreadContext::SetJITConnectionInfo(processHandle, serverSecurityDescriptor, connectionId); } #endif -#endif +#endif void __stdcall NotifyUnhandledException(PEXCEPTION_POINTERS exceptionInfo) { @@ -162,7 +162,7 @@ HRESULT OnChakraCoreLoaded() #undef FLAG_NumberSet #undef FLAG_NumberPairSet #undef FLAG_NumberRange -#if ENABLE_NATIVE_CODEGEN +#if ENABLE_NATIVE_CODEGEN && _WIN32 ConnectJITServer, #endif NotifyUnhandledException diff --git a/bin/ch/ChakraRtInterface.cpp b/bin/ch/ChakraRtInterface.cpp index 59ff21a3a05..2674e810ba1 100644 --- a/bin/ch/ChakraRtInterface.cpp +++ b/bin/ch/ChakraRtInterface.cpp @@ -180,7 +180,18 @@ void ChakraRTInterface::UnloadChakraDll(HINSTANCE library) { pDllCanUnloadNow(); } +#ifdef _WIN32 UnloadChakraCore(library); +#else // !_WIN32 + // PAL thread shutdown needs more time after execution completion. + // Do not FreeLibrary. Invoke DllMain(DLL_PROCESS_DETACH) directly. + typedef BOOL (__stdcall *PDLLMAIN)(HINSTANCE, DWORD, LPVOID); + PDLLMAIN pDllMain = (PDLLMAIN) GetChakraCoreSymbol(library, "DllMain"); + if (pDllMain) + { + pDllMain(library, DLL_PROCESS_DETACH, NULL); + } +#endif #endif } diff --git a/bin/ch/Debugger.cpp b/bin/ch/Debugger.cpp index b8f9009e638..77b36b14d37 100644 --- a/bin/ch/Debugger.cpp +++ b/bin/ch/Debugger.cpp @@ -473,7 +473,7 @@ bool Debugger::CompareOrWriteBaselineFile(LPCSTR fileName) IfJsrtErrorFailLogAndRetFalse(ChakraRTInterface::JsStringToPointerUtf8Copy(result, &baselineData, &baselineDataLength)); char16 baselineFilename[256]; - swprintf_s(baselineFilename, _countof(baselineFilename), HostConfigFlags::flags.dbgbaselineIsEnabled ? _u("%S.dbg.baseline.rebase") : _u("%S.dbg.baseline"), fileName); + swprintf_s(baselineFilename, HostConfigFlags::flags.dbgbaselineIsEnabled ? _u("%S.dbg.baseline.rebase") : _u("%S.dbg.baseline"), fileName); FILE *file = nullptr; if (_wfopen_s(&file, baselineFilename, _u("wt")) != 0) diff --git a/bin/ch/ch.cpp b/bin/ch/ch.cpp index 324026c1a44..2fc4aedd578 100644 --- a/bin/ch/ch.cpp +++ b/bin/ch/ch.cpp @@ -37,7 +37,7 @@ int HostExceptionFilter(int exceptionCode, _EXCEPTION_POINTERS *ep) { ChakraRTInterface::NotifyUnhandledException(ep); -#if ENABLE_NATIVE_CODEGEN +#if ENABLE_NATIVE_CODEGEN && _WIN32 JITProcessManager::TerminateJITServer(); #endif bool crashOnException = false; diff --git a/build.sh b/build.sh index d9d68e4c63b..700c03be894 100755 --- a/build.sh +++ b/build.sh @@ -37,10 +37,13 @@ PRINT_USAGE() { echo " --icu=PATH Path to ICU include folder (see example below)" echo " -j [N], --jobs[=N] Multicore build, allow N jobs at once" echo " -n, --ninja Build with ninja instead of make" - echo " --no-icu Compile without unicode/icu support" + echo " --no-icu Compile without unicode/icu support" + echo " --no-jit Disable JIT" echo " --xcode Generate XCode project" echo " -t, --test-build Test build (by default Release build)" echo " --static Build as static library (by default shared library)" + echo " --sanitize=CHECKS Build with clang -fsanitize checks," + echo " e.g. undefined,signed-integer-overflow" echo " -v, --verbose Display verbose output including all options" echo " --create-deb=V Create .deb package with given V version" echo " --without=FEATURE,FEATURE,..." @@ -62,8 +65,10 @@ BUILD_TYPE="Release" CMAKE_GEN= MAKE=make MULTICORE_BUILD="" +NO_JIT= ICU_PATH="-DICU_SETTINGS_RESET=1" STATIC_LIBRARY="-DSHARED_LIBRARY_SH=1" +SANITIZE= WITHOUT_FEATURES="" CREATE_DEB=0 ARCH="-DCC_TARGETS_AMD64_SH=1" @@ -191,6 +196,10 @@ while [[ $# -gt 0 ]]; do ICU_PATH="-DNO_ICU_PATH_GIVEN_SH=1" ;; + --no-jit) + NO_JIT="-DNO_JIT_SH=1" + ;; + --xcode) CMAKE_GEN="-G Xcode -DCC_XCODE_PROJECT=1" MAKE=0 @@ -205,13 +214,19 @@ while [[ $# -gt 0 ]]; do STATIC_LIBRARY="-DSTATIC_LIBRARY_SH=1" ;; + --sanitize=*) + SANITIZE=$1 + SANITIZE=${SANITIZE:11} # value after --sanitize= + SANITIZE="-DCLANG_SANITIZE_SH=${SANITIZE}" + ;; + --without=*) FEATURES=$1 FEATURES=${FEATURES:10} # value after --without= for x in ${FEATURES//,/ } # replace comma with space then split do if [[ "$WITHOUT_FEATURES" == "" ]]; then - WITHOUT_FEATURES="-DWITHOUT_FEATURES=" + WITHOUT_FEATURES="-DWITHOUT_FEATURES_SH=" else WITHOUT_FEATURES="$WITHOUT_FEATURES;" fi @@ -306,7 +321,8 @@ else fi echo Generating $BUILD_TYPE makefiles -cmake $CMAKE_GEN $CC_PREFIX $ICU_PATH $STATIC_LIBRARY $ARCH -DCMAKE_BUILD_TYPE=$BUILD_TYPE $WITHOUT_FEATURES ../.. +cmake $CMAKE_GEN $CC_PREFIX $ICU_PATH $STATIC_LIBRARY $ARCH \ + -DCMAKE_BUILD_TYPE=$BUILD_TYPE $SANITIZE $NO_JIT $WITHOUT_FEATURES ../.. _RET=$? if [[ $? == 0 ]]; then diff --git a/lib/Backend/Backend.cpp b/lib/Backend/Backend.cpp index 6e43951c934..de6ccd1e616 100644 --- a/lib/Backend/Backend.cpp +++ b/lib/Backend/Backend.cpp @@ -3,3 +3,7 @@ // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- #include "Backend.h" + +#if !ENABLE_OOP_NATIVE_CODEGEN +JITManager JITManager::s_jitManager; // dummy object when OOP JIT disabled +#endif diff --git a/lib/Backend/Backend.h b/lib/Backend/Backend.h index 0e007cc53f3..9d852e1fc56 100644 --- a/lib/Backend/Backend.h +++ b/lib/Backend/Backend.h @@ -110,7 +110,9 @@ enum IRDumpFlags // BackEnd includes // +#ifdef _WIN32 #include "ChakraJIT.h" +#endif #include "JITTimeProfileInfo.h" #include "JITRecyclableObject.h" #include "JITTimeFixedField.h" diff --git a/lib/Backend/BackwardPass.cpp b/lib/Backend/BackwardPass.cpp index 241edbef8e0..723abcd4ab0 100644 --- a/lib/Backend/BackwardPass.cpp +++ b/lib/Backend/BackwardPass.cpp @@ -3311,13 +3311,13 @@ BackwardPass::ProcessNoImplicitCallDef(IR::Instr *const instr) else if( !( // LdFld or similar - dst->IsRegOpnd() && src->IsSymOpnd() && src->AsSymOpnd()->m_sym->IsPropertySym() || + (dst->IsRegOpnd() && src->IsSymOpnd() && src->AsSymOpnd()->m_sym->IsPropertySym()) || // StFld or similar. Don't transfer a field opnd from StFld into the reg opnd src unless the field's value type is // definitely array or object with array, because only those value types require implicit calls to be disabled as // long as they are live. Other definite value types only require implicit calls to be disabled as long as a live // field holds the value, which is up to the StFld when going backwards. - src->IsRegOpnd() && dst->GetValueType().IsArrayOrObjectWithArray() + (src->IsRegOpnd() && dst->GetValueType().IsArrayOrObjectWithArray()) ) || !GlobOpt::TransferSrcValue(instr)) { @@ -3972,8 +3972,8 @@ BackwardPass::UpdateArrayBailOutKind(IR::Instr *const instr) Assert(instr); Assert(instr->HasBailOutInfo()); - if (instr->m_opcode != Js::OpCode::StElemI_A && instr->m_opcode != Js::OpCode::StElemI_A_Strict && - instr->m_opcode != Js::OpCode::Memcopy && instr->m_opcode != Js::OpCode::Memset || + if ((instr->m_opcode != Js::OpCode::StElemI_A && instr->m_opcode != Js::OpCode::StElemI_A_Strict && + instr->m_opcode != Js::OpCode::Memcopy && instr->m_opcode != Js::OpCode::Memset) || !instr->GetDst()->IsIndirOpnd()) { return; @@ -5511,7 +5511,7 @@ BackwardPass::TrackIntUsage(IR::Instr *const instr) if (instr->ignoreNegativeZero || (instr->GetSrc1()->IsIntConstOpnd() && instr->GetSrc1()->AsIntConstOpnd()->GetValue() != 0) || - instr->GetSrc2()->IsIntConstOpnd() && instr->GetSrc2()->AsIntConstOpnd()->GetValue() != 0) + (instr->GetSrc2()->IsIntConstOpnd() && instr->GetSrc2()->AsIntConstOpnd()->GetValue() != 0)) { SetNegativeZeroDoesNotMatterIfLastUse(instr->GetSrc1()); SetNegativeZeroDoesNotMatterIfLastUse(instr->GetSrc2()); @@ -5567,7 +5567,7 @@ BackwardPass::TrackIntUsage(IR::Instr *const instr) if (instr->ignoreNegativeZero || (instr->GetSrc1()->IsIntConstOpnd() && instr->GetSrc1()->AsIntConstOpnd()->GetValue() != 0) || - instr->GetSrc2()->IsIntConstOpnd() && instr->GetSrc2()->AsIntConstOpnd()->GetValue() != 0) + (instr->GetSrc2()->IsIntConstOpnd() && instr->GetSrc2()->AsIntConstOpnd()->GetValue() != 0)) { SetNegativeZeroDoesNotMatterIfLastUse(instr->GetSrc1()); SetNegativeZeroDoesNotMatterIfLastUse(instr->GetSrc2()); @@ -5890,7 +5890,7 @@ BackwardPass::TrackIntUsage(IR::Instr *const instr) !!candidateSymsRequiredToBeLossyInt->Test(srcSymId); const bool srcNeedsToBeLossless = !currentBlock->intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Test(dstSym->m_id) || - srcIncluded && !srcIncludedAsLossy; + (srcIncluded && !srcIncludedAsLossy); if(srcIncluded) { if(srcIncludedAsLossy && srcNeedsToBeLossless) @@ -6362,7 +6362,7 @@ BackwardPass::TrackFloatSymEquivalence(IR::Instr *const instr) // throughout the function and checking just the sym's non-number bailout bit is insufficient. FloatSymEquivalenceClass *dstEquivalenceClass; if(dst->m_requiresBailOnNotNumber || - floatSymEquivalenceMap->TryGetValue(dst->m_id, &dstEquivalenceClass) && dstEquivalenceClass->RequiresBailOnNotNumber()) + (floatSymEquivalenceMap->TryGetValue(dst->m_id, &dstEquivalenceClass) && dstEquivalenceClass->RequiresBailOnNotNumber())) { instr->SetBailOutKind(IR::BailOutNumberOnly); } diff --git a/lib/Backend/BailOut.cpp b/lib/Backend/BailOut.cpp index 65d6c5c3e40..6868265df07 100644 --- a/lib/Backend/BailOut.cpp +++ b/lib/Backend/BailOut.cpp @@ -323,8 +323,8 @@ BailOutRecord::BailOutRecord(uint32 bailOutOffset, uint bailOutCacheIndex, IR::B Output::Flush(); \ } \ } -char16 * const trueString = _u("true"); -char16 * const falseString = _u("false"); +const char16 * const trueString = _u("true"); +const char16 * const falseString = _u("false"); #else #define REJIT_TESTTRACE(...) #define REJIT_KIND_TESTTRACE(...) @@ -1478,7 +1478,7 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF // Initialize the interpreter stack frame (constants) but not the param, the bailout record will restore the value #if DBG // Allocate invalidVar on GC instead of stack since this InterpreterStackFrame will out live the current real frame - Js::RecyclableObject* invalidVar = (Js::RecyclableObject*)RecyclerNewPlusLeaf(functionScriptContext->GetRecycler(), sizeof(Js::RecyclableObject), Js::Var); + Js::Var invalidVar = (Js::RecyclableObject*)RecyclerNewPlusLeaf(functionScriptContext->GetRecycler(), sizeof(Js::RecyclableObject), Js::Var); memset(invalidVar, 0xFE, sizeof(Js::RecyclableObject)); newInstance = setup.InitializeAllocation(allocation, false, false, loopHeaderArray, stackAddr, invalidVar); #else @@ -1528,7 +1528,7 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF // Initialize the interpreter stack frame (constants) but not the param, the bailout record will restore the value #if DBG - Js::RecyclableObject * invalidStackVar = (Js::RecyclableObject*)_alloca(sizeof(Js::RecyclableObject)); + Js::Var invalidStackVar = (Js::RecyclableObject*)_alloca(sizeof(Js::RecyclableObject)); memset(invalidStackVar, 0xFE, sizeof(Js::RecyclableObject)); newInstance = setup.InitializeAllocation(allocation, false, false, loopHeaderArray, frameStackAddr, invalidStackVar); #else diff --git a/lib/Backend/BailOut.h b/lib/Backend/BailOut.h index f0acdb43005..515d9a2b35f 100644 --- a/lib/Backend/BailOut.h +++ b/lib/Backend/BailOut.h @@ -27,8 +27,8 @@ class BailOutInfo bailOutOffset(bailOutOffset), bailOutFunc(bailOutFunc), byteCodeUpwardExposedUsed(nullptr), polymorphicCacheIndex((uint)-1), startCallCount(0), startCallInfo(nullptr), bailOutInstr(nullptr), totalOutParamCount(0), argOutSyms(nullptr), bailOutRecord(nullptr), wasCloned(false), isInvertedBranch(false), sharedBailOutKind(true), outParamInlinedArgSlot(nullptr), - liveVarSyms(nullptr), liveLosslessInt32Syms(nullptr), - liveFloat64Syms(nullptr), liveSimd128F4Syms(nullptr), + liveVarSyms(nullptr), liveLosslessInt32Syms(nullptr), + liveFloat64Syms(nullptr), liveSimd128F4Syms(nullptr), liveSimd128I4Syms(nullptr), liveSimd128I8Syms(nullptr), liveSimd128I16Syms(nullptr), liveSimd128U4Syms(nullptr), liveSimd128U8Syms(nullptr), liveSimd128U16Syms(nullptr), liveSimd128B4Syms(nullptr), liveSimd128B8Syms(nullptr), liveSimd128B16Syms(nullptr), @@ -203,8 +203,8 @@ class BailOutRecord template void FillNativeRegToByteCodeRegMap(uint (&nativeRegToByteCodeRegMap)[N]); - void IsOffsetNativeIntOrFloat(uint offsetIndex, int argOutSlotStart, bool * pIsFloat64, bool * pIsInt32, - bool * pIsSimd128F4, bool * pIsSimd128I4, bool * pIsSimd128I8, bool * pIsSimd128I16, + void IsOffsetNativeIntOrFloat(uint offsetIndex, int argOutSlotStart, bool * pIsFloat64, bool * pIsInt32, + bool * pIsSimd128F4, bool * pIsSimd128I4, bool * pIsSimd128I8, bool * pIsSimd128I16, bool * pIsSimd128U4, bool * pIsSimd128U8, bool * pIsSimd128U16, bool * pIsSimd128B4, bool * pIsSimd128B8, bool * pIsSimd128B16) const; template @@ -263,8 +263,8 @@ class BailOutRecord void * argoutRestoreAddress = nullptr) const; void RestoreValue(IR::BailOutKind bailOutKind, Js::JavascriptCallStackLayout * layout, Js::Var * values, Js::ScriptContext * scriptContext, bool fromLoopBody, Js::Var * registerSaves, Js::InterpreterStackFrame * newInstance, Js::Var* pArgumentsObject, void * argoutRestoreAddress, - uint regSlot, int offset, bool isLocal, bool isFloat64, bool isInt32, - bool isSimd128F4, bool isSimd128I4, bool isSimd128I8, bool isSimd128I16, + uint regSlot, int offset, bool isLocal, bool isFloat64, bool isInt32, + bool isSimd128F4, bool isSimd128I4, bool isSimd128I8, bool isSimd128I16, bool isSimd128U4, bool isSimd128U8, bool isSimd128U16, bool isSimd128B4, bool isSimd128B8, bool isSimd128B16 ) const; void RestoreInlineFrame(InlinedFrameLayout *inlinedFrame, Js::JavascriptCallStackLayout * layout, Js::Var * registerSaves); @@ -274,12 +274,14 @@ class BailOutRecord Js::JavascriptCallStackLayout *GetStackLayout() const; +public: struct StackLiteralBailOutRecord { Js::RegSlot regSlot; uint initFldCount; }; +protected: struct ArgOutOffsetInfo { BVFixed * argOutFloat64Syms; // Used for float-type-specialized ArgOut symbols. Index = [0 .. BailOutInfo::totalOutParamCount]. @@ -305,7 +307,7 @@ class BailOutRecord FixupNativeDataPointer(argOutFloat64Syms, chunkList); FixupNativeDataPointer(argOutLosslessInt32Syms, chunkList); FixupNativeDataPointer(argOutSimd128F4Syms, chunkList); - FixupNativeDataPointer(argOutSimd128I4Syms, chunkList); + FixupNativeDataPointer(argOutSimd128I4Syms, chunkList); FixupNativeDataPointer(argOutSimd128I8Syms, chunkList); FixupNativeDataPointer(argOutSimd128I16Syms, chunkList); FixupNativeDataPointer(argOutSimd128U4Syms, chunkList); @@ -321,7 +323,7 @@ class BailOutRecord int* outParamOffsetsStart = outParamOffsets - argOutSymStart; NativeCodeData::AddFixupEntry(outParamOffsets, outParamOffsetsStart, &this->outParamOffsets, this, chunkList); - + } }; @@ -469,8 +471,8 @@ struct GlobalBailOutRecordDataTable bool hasNonSimpleParams; bool hasStackArgOpt; void Finalize(NativeCodeData::Allocator *allocator, JitArenaAllocator *tempAlloc); - void AddOrUpdateRow(JitArenaAllocator *allocator, uint32 bailOutRecordId, uint32 regSlot, bool isFloat, bool isInt, - bool isSimd128F4, bool isSimd128I4, bool isSimd128I8, bool isSimd128I16, bool isSimd128U4, bool isSimd128U8, bool isSimd128U16, bool isSimd128B4, bool isSimd128B8, bool isSimd128B16, + void AddOrUpdateRow(JitArenaAllocator *allocator, uint32 bailOutRecordId, uint32 regSlot, bool isFloat, bool isInt, + bool isSimd128F4, bool isSimd128I4, bool isSimd128I8, bool isSimd128I16, bool isSimd128U4, bool isSimd128U8, bool isSimd128U16, bool isSimd128B4, bool isSimd128B8, bool isSimd128B16, int32 offset, uint *lastUpdatedRowIndex); template @@ -522,51 +524,52 @@ struct GlobalBailOutRecordDataTable } }; #if DBG -template<> void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) {} -template<> void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) {} -template<> void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) {} +template<> inline void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) {} +template<> inline void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) {} +template<> inline void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) {} #else template<> -char* +inline char* NativeCodeData::AllocatorT::Alloc(size_t requestedBytes) { return __super::Alloc(requestedBytes); } template<> -char* +inline char* NativeCodeData::AllocatorT::AllocZero(size_t requestedBytes) { return __super::AllocZero(requestedBytes); } template<> -char* +inline char* NativeCodeData::AllocatorT::Alloc(size_t requestedBytes) { return __super::Alloc(requestedBytes); } template<> -char* +inline char* NativeCodeData::AllocatorT::AllocZero(size_t requestedBytes) { return __super::AllocZero(requestedBytes); } template<> -char* +inline char* NativeCodeData::AllocatorT::Alloc(size_t requestedBytes) { return __super::Alloc(requestedBytes); } template<> -char* +inline char* NativeCodeData::AllocatorT::AllocZero(size_t requestedBytes) { return __super::AllocZero(requestedBytes); } #endif -template<> void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) +template<> +inline void NativeCodeData::AllocatorT::Fixup(void* pThis, NativeCodeData::DataChunk* chunkList) { // for every pointer needs to update the table NativeCodeData::AddFixupEntryForPointerArray(pThis, chunkList); diff --git a/lib/Backend/CMakeLists.txt b/lib/Backend/CMakeLists.txt index 603ed0a89fe..bc78fce764d 100644 --- a/lib/Backend/CMakeLists.txt +++ b/lib/Backend/CMakeLists.txt @@ -1,6 +1,8 @@ -add_library (Chakra.Backend +add_library (Chakra.Backend OBJECT AgenPeeps.cpp + AsmJsJITInfo.cpp Backend.cpp + BackendApi.cpp BackendOpCodeAttrAsmJs.cpp BackwardPass.cpp BailOut.cpp @@ -10,10 +12,13 @@ add_library (Chakra.Backend CodeGenWorkItem.cpp DbCheckPostLower.cpp Debug.cpp + EhFrame.cpp EmitBuffer.cpp Encoder.cpp FlowGraph.cpp Func.cpp + FunctionJITRuntimeInfo.cpp + FunctionJITTimeInfo.cpp GlobOpt.cpp GlobOptBailOut.cpp GlobOptExpr.cpp @@ -32,6 +37,18 @@ add_library (Chakra.Backend InliningHeuristics.cpp IntBounds.cpp InterpreterThunkEmitter.cpp + JITObjTypeSpecFldInfo.cpp + JITOutput.cpp + JITTimeConstructorCache.cpp + JITTimeFixedField.cpp + JITTimeFunctionBody.cpp + JITTimePolymorphicInlineCache.cpp + JITTimePolymorphicInlineCacheInfo.cpp + JITTimeProfileInfo.cpp + JITTimeScriptContext.cpp + JITTimeWorkItem.cpp + JITType.cpp + JITTypeHandler.cpp JnHelperMethod.cpp LinearScan.cpp Lower.cpp @@ -48,6 +65,8 @@ add_library (Chakra.Backend Region.cpp SccLiveness.cpp Security.cpp + ServerScriptContext.cpp + ServerThreadContext.cpp SimpleJitProfilingHelpers.cpp SimpleLayout.cpp SwitchIRBuilder.cpp @@ -55,24 +74,34 @@ add_library (Chakra.Backend SymTable.cpp TempTracker.cpp ValueRelativeOffset.cpp - amd64\EncoderMD.cpp - amd64\LinearScanMD.cpp - amd64\LowererMDArch.cpp - amd64\PeepsMD.cpp - amd64\PrologEncoderMD.cpp - arm64\EncoderMD.cpp - arm64\LowerMD.cpp - arm\EncoderMD.cpp - arm\LegalizeMD.cpp - arm\LinearScanMD.cpp - arm\LowerMD.cpp - arm\PeepsMD.cpp - arm\UnwindInfoManager.cpp - i386\EncoderMD.cpp - i386\LinearScanMD.cpp - i386\LowererMDArch.cpp - i386\PeepsMD.cpp + amd64/EncoderMD.cpp + amd64/LinearScanMD.cpp + amd64/LowererMDArch.cpp + amd64/PeepsMD.cpp + amd64/PrologEncoderMD.cpp + amd64/LinearScanMdA.S + amd64/Thunks.S +# arm64/EncoderMD.cpp +# arm64/LowerMD.cpp +# arm/EncoderMD.cpp +# arm/LegalizeMD.cpp +# arm/LinearScanMD.cpp +# arm/LowerMD.cpp +# arm/PeepsMD.cpp +# arm/UnwindInfoManager.cpp +# i386/EncoderMD.cpp +# i386/LinearScanMD.cpp +# i386/LowererMDArch.cpp +# i386/PeepsMD.cpp ) target_include_directories ( - Chakra.Backend PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + Chakra.Backend PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} + amd64 + ../Common + ../JITIDL + ../Runtime + ../Runtime/ByteCode + ../Runtime/Math + ../Parser + ) diff --git a/lib/Backend/CodeGenWorkItem.h b/lib/Backend/CodeGenWorkItem.h index f33bfe0b095..3788d3c5df2 100644 --- a/lib/Backend/CodeGenWorkItem.h +++ b/lib/Backend/CodeGenWorkItem.h @@ -227,7 +227,7 @@ struct JsFunctionCodeGen sealed : public CodeGenWorkItem void GetEntryPointAddress(void** entrypoint, ptrdiff_t *size) override { Assert(entrypoint); - *entrypoint = this->GetEntryPoint()->jsMethod; + *entrypoint = (void*)this->GetEntryPoint()->jsMethod; *size = this->GetEntryPoint()->GetCodeSize(); } diff --git a/lib/Backend/EhFrame.cpp b/lib/Backend/EhFrame.cpp new file mode 100644 index 00000000000..c4344dd2688 --- /dev/null +++ b/lib/Backend/EhFrame.cpp @@ -0,0 +1,228 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- +#include "Backend.h" +#include "EhFrame.h" + +// AMD64 ABI -- DWARF register number mapping +static const ubyte DWARF_RegNum[] = +{ + // Exactly same order as RegList.h! + -1, // NOREG, + 0, // RAX, + 2, // RCX, + 1, // RDX, + 3, // RBX, + 7, // RSP, + 6, // RBP, + 4, // RSI, + 5, // RDI, + 8, // R8, + 9, // R9, + 10, // R10, + 11, // R11, + 12, // R12, + 13, // R13, + 14, // R14, + 15, // R15, + 17, // XMM0, + 18, // XMM1, + 19, // XMM2, + 20, // XMM3, + 21, // XMM4, + 22, // XMM5, + 23, // XMM6, + 24, // XMM7, + 25, // XMM8, + 26, // XMM9, + 27, // XMM10, + 28, // XMM11, + 29, // XMM12, + 30, // XMM13, + 31, // XMM14, + 32, // XMM15, +}; + +static const ubyte DWARF_RegRA = 16; + +ubyte GetDwarfRegNum(ubyte regNum) +{ + return DWARF_RegNum[regNum]; +} + +// Enocde into ULEB128 (Unsigned Little Endian Base 128) +BYTE* EmitLEB128(BYTE* pc, unsigned value) +{ + do + { + BYTE b = value & 0x7F; // low order 7 bits + value >>= 7; + + if (value) // more bytes to come + { + b |= 0x80; + } + + *pc++ = b; + } + while (value != 0); + + return pc; +} + +// Encode into signed LEB128 (Signed Little Endian Base 128) +BYTE* EmitLEB128(BYTE* pc, int value) +{ + static const int size = sizeof(value) * 8; + static const bool isLogicShift = (-1 >> 1) != -1; + + const bool signExtend = isLogicShift && value < 0; + + bool more = true; + while (more) + { + BYTE b = value & 0x7F; // low order 7 bits + value >>= 7; + + if (signExtend) + { + value |= - (1 << (size - 7)); // sign extend + } + + const bool signBit = (b & 0x40) != 0; + if ((value == 0 && !signBit) || (value == -1 && signBit)) + { + more = false; + } + else + { + b |= 0x80; + } + + *pc++ = b; + } + + return pc; +} + + +void EhFrame::Entry::Begin() +{ + Assert(beginOffset == -1); + beginOffset = writer->Count(); + + // Write Length place holder + const uword length = 0; + writer->Write(length); +} + +void EhFrame::Entry::End() +{ + Assert(beginOffset != -1); // Must have called Begin() + + // padding + size_t padding = (MachPtr - writer->Count() % MachPtr) % MachPtr; + for (size_t i = 0; i < padding; i++) + { + cfi_nop(); + } + + // update length record + uword length = writer->Count() - beginOffset + - sizeof(length); // exclude length itself + writer->Write(beginOffset, length); +} + +void EhFrame::Entry::cfi_advance(uword advance) +{ + if (advance <= 0x3F) // 6-bits + { + cfi_advance_loc(static_cast(advance)); + } + else if (advance <= 0xFF) // 1-byte + { + cfi_advance_loc1(static_cast(advance)); + } + else if (advance <= 0xFFFF) // 2-byte + { + cfi_advance_loc2(static_cast(advance)); + } + else // 4-byte + { + cfi_advance_loc4(advance); + } +} + +void EhFrame::CIE::Begin() +{ + Assert(writer->Count() == 0); + Entry::Begin(); + + const uword cie_id = 0; + Emit(cie_id); + + const ubyte version = 1; + Emit(version); + + const ubyte augmentationString = 0; // none + Emit(augmentationString); + + const ULEB128 codeAlignmentFactor = 1; + Emit(codeAlignmentFactor); + + const LEB128 dataAlignmentFactor = - MachPtr; + Emit(dataAlignmentFactor); + + const ubyte returnAddressRegister = DWARF_RegRA; + Emit(returnAddressRegister); +} + + +void EhFrame::FDE::Begin() +{ + Entry::Begin(); + + const uword cie_id = writer->Count(); + Emit(cie_id); + + // Write pc placeholder + pcBeginOffset = writer->Count(); + const void* pc = nullptr; + Emit(pc); + Emit(pc); +} + +void EhFrame::FDE::UpdateAddressRange(const void* pcBegin, size_t pcRange) +{ + writer->Write(pcBeginOffset, pcBegin); + writer->Write(pcBeginOffset + sizeof(pcBegin), + reinterpret_cast(pcRange)); +} + + +EhFrame::EhFrame(BYTE* buffer, size_t size) + : writer(buffer, size), fde(&writer) +{ + CIE cie(&writer); + cie.Begin(); + + // CIE initial instructions + // DW_CFA_def_cfa: r7 (rsp) ofs 8 + cie.cfi_def_cfa(DWARF_RegNum[LowererMDArch::GetRegStackPointer()], MachPtr); + // DW_CFA_offset: r16 (rip) at cfa-8 (data alignment -8) + cie.cfi_offset(DWARF_RegRA, 1); + + cie.End(); + + fde.Begin(); +} + +void EhFrame::End() +{ + fde.End(); + + // Write length 0 to mark terminate entry + const uword terminate_entry_length = 0; + writer.Write(terminate_entry_length); +} diff --git a/lib/Backend/EhFrame.h b/lib/Backend/EhFrame.h new file mode 100644 index 00000000000..812cf749abd --- /dev/null +++ b/lib/Backend/EhFrame.h @@ -0,0 +1,206 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- +#pragma once + +typedef BYTE ubyte; +typedef uint16 uhalf; +typedef uint32 uword; +CompileAssert(sizeof(ubyte) == 1); +CompileAssert(sizeof(uhalf) == 2); +CompileAssert(sizeof(uword) == 4); + +BYTE* EmitLEB128(BYTE* pc, unsigned value); +BYTE* EmitLEB128(BYTE* pc, int value); +ubyte GetDwarfRegNum(ubyte regNum); + +template +class LEB128Wrapper +{ +private: + T value; + +public: + LEB128Wrapper(T value): value(value) + {} + + BYTE* Write(BYTE* pc) const + { + return EmitLEB128(pc, value); + } +}; + +typedef LEB128Wrapper ULEB128; +typedef LEB128Wrapper LEB128; + +// +// EhFrame emits .eh_frame unwind data for our JIT code. We emit only one CIE +// followed by one FDE for each JIT function. +// +class EhFrame +{ + // Simple buffer writer. Must operate on a buffer of sufficient size. + class Writer + { + private: + BYTE* buffer; // original buffer head + BYTE* cur; // current output position + const size_t size; // original size of buffer, for debug only + + public: + Writer(BYTE* buffer, size_t size) : buffer(buffer), cur(buffer), size(size) + {} + + // Write a value, and advance cur position + template + void Write(T value) + { + *reinterpret_cast(cur) = value; + cur += sizeof(value); + Assert(Count() <= size); + } + + // Write a ULEB128 or LEB128 value, and advance cur position + template + void Write(const LEB128Wrapper& leb128) + { + cur = leb128.Write(cur); + Assert(Count() <= size); + } + + // Write a value at an absolute position + template + void Write(size_t offset, T value) + { + Assert(offset + sizeof(value) <= size); + *reinterpret_cast(buffer + offset) = value; + } + + // Get original buffer head + BYTE* Buffer() const + { + return buffer; + } + + // Get count of written bytes (== offset of cur position) + size_t Count() const + { + return cur - buffer; + } + }; + + // Base class for CIE and FDE + class Entry + { + protected: + Writer* writer; + size_t beginOffset; // where we'll update "length" record + + // To limit supported value types + void Emit(ubyte value) { writer->Write(value); } + void Emit(uhalf value) { writer->Write(value); } + void Emit(uword value) { writer->Write(value); } + void Emit(const void* absptr) { writer->Write(absptr); } + void Emit(LEB128 value) { writer->Write(value); } + void Emit(ULEB128 value) { writer->Write(value); } + + template + void Emit(ubyte op, T1 arg1) + { + Emit(op); + Emit(arg1); + } + + template + void Emit(ubyte op, T1 arg1, T2 arg2) + { + Emit(op, arg1); + Emit(arg2); + } + + public: + Entry(Writer* writer) : writer(writer), beginOffset(-1) + {} + + void Begin(); + void End(); + +#define ENTRY(name, op) \ + void cfi_##name() \ + { Emit(static_cast(op)); } + +#define ENTRY1(name, op, arg1_type) \ + void cfi_##name(arg1_type arg1) \ + { Emit(op, arg1); } + +#define ENTRY2(name, op, arg1_type, arg2_type) \ + void cfi_##name(arg1_type arg1, arg2_type arg2) \ + { Emit(op, arg1, arg2); } + +#define ENTRY_SM1(name, op, arg1_type) \ + void cfi_##name(arg1_type arg1) \ + { Assert((arg1) <= 0x3F); Emit(static_cast((op) | arg1)); } + +#define ENTRY_SM2(name, op, arg1_type, arg2_type) \ + void cfi_##name(arg1_type arg1, arg2_type arg2) \ + { Assert((arg1) <= 0x3F); Emit((op) | arg1, arg2); } + +#include "EhFrameCFI.inc" + + void cfi_advance(uword advance); + }; + + // Common Information Entry + class CIE : public Entry + { + public: + CIE(Writer* writer) : Entry(writer) + {} + + void Begin(); + }; + + // Frame Description Entry + class FDE: public Entry + { + private: + size_t pcBeginOffset; + + public: + FDE(Writer* writer) : Entry(writer) + {} + + void Begin(); + void UpdateAddressRange(const void* pcBegin, size_t pcRange); + }; + +private: + Writer writer; + FDE fde; + +public: + EhFrame(BYTE* buffer, size_t size); + + Writer* GetWriter() + { + return &writer; + } + + FDE* GetFDE() + { + return &fde; + } + + void End(); + + BYTE* Buffer() const + { + return writer.Buffer(); + } + + size_t Count() const + { + return writer.Count(); + } +}; diff --git a/lib/Backend/EhFrameCFI.inc b/lib/Backend/EhFrameCFI.inc new file mode 100644 index 00000000000..ff677b0345e --- /dev/null +++ b/lib/Backend/EhFrameCFI.inc @@ -0,0 +1,29 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +// +// A subset of DWARF cfi +// + +// instruction _2_6_ bits arg1 arg2 +// +ENTRY_SM1( advance_loc, 0x1 << 6, ubyte ) +ENTRY_SM2( offset, 0x2 << 6, ubyte, ULEB128 ) +ENTRY_SM1( restore, 0x3 << 6, ubyte ) + +ENTRY1 ( advance_loc1, 0x02, ubyte ) +ENTRY1 ( advance_loc2, 0x03, uhalf ) +ENTRY1 ( advance_loc4, 0x04, uword ) + +ENTRY2 ( def_cfa, 0x0c, ULEB128, ULEB128 ) +ENTRY1 ( def_cfa_offset, 0x0e, ULEB128 ) + +ENTRY ( nop, 0 ) + +#undef ENTRY_SM1 +#undef ENTRY_SM2 +#undef ENTRY +#undef ENTRY1 +#undef ENTRY2 diff --git a/lib/Backend/Encoder.cpp b/lib/Backend/Encoder.cpp index 824ca51ce43..e96038f9b95 100644 --- a/lib/Backend/Encoder.cpp +++ b/lib/Backend/Encoder.cpp @@ -81,10 +81,12 @@ Encoder::Encode() { #ifdef _M_X64 case Js::OpCode::PrologStart: + m_func->m_prologEncoder.Begin(m_pc - m_encodeBuffer); inProlog = true; continue; case Js::OpCode::PrologEnd: + m_func->m_prologEncoder.End(); inProlog = false; continue; #endif @@ -313,8 +315,9 @@ Encoder::Encode() m_func->GetJITOutput()->RecordNativeCode(m_func, m_encodeBuffer, alloc); #ifdef _M_X64 - m_func->m_prologEncoder.FinalizeUnwindInfo(); - + m_func->m_prologEncoder.FinalizeUnwindInfo( + (BYTE*)m_func->GetJITOutput()->GetCodeAddress(), (DWORD)codeSize); + m_func->GetJITOutput()->RecordUnwindInfo( 0, m_func->m_prologEncoder.GetUnwindInfo(), @@ -351,7 +354,7 @@ Encoder::Encode() { NativeOffsetInlineeFrameRecordOffset* pairs = NativeCodeDataNewArrayZNoFixup(m_func->GetNativeCodeDataAllocator(), NativeOffsetInlineeFrameRecordOffset, this->m_inlineeFrameMap->Count()); - this->m_inlineeFrameMap->Map([&pairs](int i, NativeOffsetInlineeFramePair& p) + this->m_inlineeFrameMap->Map([&pairs](int i, NativeOffsetInlineeFramePair& p) { pairs[i].offset = p.offset; if (p.record) @@ -560,7 +563,7 @@ Encoder::Encode() (*entry)->propId = propertyId; (*entry)->guardsCount = count; (*entry)->next = nullptr; - + auto& guardOffsets = (*entry)->guardOffsets; int guardIndex = 0; srcSet->Map([&guardOffsets, &guardIndex](Js::JitIndexedPropertyGuard* guard) -> void diff --git a/lib/Backend/FlowGraph.cpp b/lib/Backend/FlowGraph.cpp index 68b9c305b28..00c770c3135 100644 --- a/lib/Backend/FlowGraph.cpp +++ b/lib/Backend/FlowGraph.cpp @@ -35,7 +35,7 @@ FlowGraph::Build(void) // (BailOnSimpleJitToFullJitLoopBody). For that purpose, we need the flow from try to catch. if (this->func->HasTry() && (this->func->DoOptimizeTryCatch() || - this->func->IsSimpleJit() && this->func->GetJITFunctionBody()->DoJITLoopBody() + (this->func->IsSimpleJit() && this->func->GetJITFunctionBody()->DoJITLoopBody()) ) ) { diff --git a/lib/Backend/Func.cpp b/lib/Backend/Func.cpp index 3e5c90190ab..deb7000384e 100644 --- a/lib/Backend/Func.cpp +++ b/lib/Backend/Func.cpp @@ -169,7 +169,7 @@ Func::Func(JitArenaAllocator *alloc, JITTimeWorkItem * workItem, } if (m_workItem->Type() == JsFunctionType && - GetJITFunctionBody()->DoBackendArgumentsOptimization() && + GetJITFunctionBody()->DoBackendArgumentsOptimization() && !GetJITFunctionBody()->HasTry()) { // doBackendArgumentsOptimization bit is set when there is no eval inside a function @@ -184,7 +184,7 @@ Func::Func(JitArenaAllocator *alloc, JITTimeWorkItem * workItem, this->GetTopFunc()->hasAnyStackNestedFunc = true; } - if (GetJITFunctionBody()->HasOrParentHasArguments() || parentFunc && parentFunc->thisOrParentInlinerHasArguments) + if (GetJITFunctionBody()->HasOrParentHasArguments() || (parentFunc && parentFunc->thisOrParentInlinerHasArguments)) { thisOrParentInlinerHasArguments = true; } @@ -622,7 +622,7 @@ Func::TryCodegen() #if DBG if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData Server Buffer: %p, len: %x, chunk head: %p\n", jitOutputData->buffer->data, jitOutputData->buffer->len, chunk); + Output::Print(_u("NativeCodeData Server Buffer: %p, len: %x, chunk head: %p\n"), jitOutputData->buffer->data, jitOutputData->buffer->len, chunk); } #endif } @@ -1553,14 +1553,14 @@ Func::IsFormalsArraySym(SymID symId) return stackArgWithFormalsTracker->GetFormalsArraySyms()->Test(symId); } -void +void Func::TrackFormalsArraySym(SymID symId) { EnsureStackArgWithFormalsTracker(); stackArgWithFormalsTracker->SetFormalsArraySyms(symId); } -void +void Func::TrackStackSymForFormalIndex(Js::ArgSlot formalsIndex, StackSym * sym) { EnsureStackArgWithFormalsTracker(); @@ -1568,7 +1568,7 @@ Func::TrackStackSymForFormalIndex(Js::ArgSlot formalsIndex, StackSym * sym) stackArgWithFormalsTracker->SetStackSymInFormalsIndexMap(sym, formalsIndex, formalsCount); } -StackSym * +StackSym * Func::GetStackSymForFormal(Js::ArgSlot formalsIndex) { if (stackArgWithFormalsTracker == nullptr || stackArgWithFormalsTracker->GetFormalsIndexToStackSymMap() == nullptr) @@ -1613,7 +1613,7 @@ Func::SetNativeCodeDataSym(StackSym * opnd) m_nativeCodeDataSym = opnd; } -StackSym* +StackSym* Func::GetScopeObjSym() { if (stackArgWithFormalsTracker == nullptr) @@ -1623,7 +1623,7 @@ Func::GetScopeObjSym() return stackArgWithFormalsTracker->GetScopeObjSym(); } -BVSparse * +BVSparse * StackArgWithFormalsTracker::GetFormalsArraySyms() { return formalsArraySyms; @@ -1639,13 +1639,13 @@ StackArgWithFormalsTracker::SetFormalsArraySyms(SymID symId) formalsArraySyms->Set(symId); } -StackSym ** +StackSym ** StackArgWithFormalsTracker::GetFormalsIndexToStackSymMap() { return formalsIndexToStackSymMap; } -void +void StackArgWithFormalsTracker::SetStackSymInFormalsIndexMap(StackSym * sym, Js::ArgSlot formalsIndex, Js::ArgSlot formalsCount) { if(formalsIndexToStackSymMap == nullptr) @@ -1656,13 +1656,13 @@ StackArgWithFormalsTracker::SetStackSymInFormalsIndexMap(StackSym * sym, Js::Arg formalsIndexToStackSymMap[formalsIndex] = sym; } -void +void StackArgWithFormalsTracker::SetScopeObjSym(StackSym * sym) { m_scopeObjSym = sym; } -StackSym * +StackSym * StackArgWithFormalsTracker::GetScopeObjSym() { return m_scopeObjSym; @@ -1732,7 +1732,7 @@ IR::IndirOpnd * Func::GetConstantAddressIndirOpnd(intptr_t address, IR::AddrOpnd { Assert(regOpnd->m_sym->IsSingleDef()); void * curr = regOpnd->m_sym->m_instrDef->GetSrc1()->AsAddrOpnd()->m_address; - ptrdiff_t diff = (intptr_t)address - (intptr_t)curr; + ptrdiff_t diff = (uintptr_t)address - (uintptr_t)curr; if (!Math::FitsInDWord(diff)) { return false; @@ -1830,9 +1830,9 @@ Func::AllocateNumber(double value) void Func::DumpFullFunctionName() { - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - Output::Print(L"Function %s (%s)", GetJITFunctionBody()->GetDisplayName(), GetDebugNumberSet(debugStringBuffer)); + Output::Print(_u("Function %s (%s)"), GetJITFunctionBody()->GetDisplayName(), GetDebugNumberSet(debugStringBuffer)); } #endif diff --git a/lib/Backend/Func.h b/lib/Backend/Func.h index ded06e7b021..19a9bfc23ab 100644 --- a/lib/Backend/Func.h +++ b/lib/Backend/Func.h @@ -65,11 +65,11 @@ class StackArgWithFormalsTracker public: StackArgWithFormalsTracker(JitArenaAllocator *alloc): - formalsArraySyms(nullptr), - formalsIndexToStackSymMap(nullptr), + formalsArraySyms(nullptr), + formalsIndexToStackSymMap(nullptr), m_scopeObjSym(nullptr), alloc(alloc) - { + { } BVSparse * GetFormalsArraySyms(); @@ -246,7 +246,7 @@ class Func return m_entryPointInfo; } - wchar_t* GetDebugNumberSet(wchar(&bufferToWriteTo)[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]) const + char16* GetDebugNumberSet(wchar(&bufferToWriteTo)[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]) const { return m_workItem->GetJITTimeInfo()->GetDebugNumberSet(bufferToWriteTo); } diff --git a/lib/Backend/FunctionJITTimeInfo.cpp b/lib/Backend/FunctionJITTimeInfo.cpp index 3018b388911..a12bf003a9d 100644 --- a/lib/Backend/FunctionJITTimeInfo.cpp +++ b/lib/Backend/FunctionJITTimeInfo.cpp @@ -372,17 +372,17 @@ FunctionJITTimeInfo::ForceJITLoopBody() const } -wchar_t* +char16* FunctionJITTimeInfo::GetDisplayName() const { return GetBody()->GetDisplayName(); } -wchar_t* +char16* FunctionJITTimeInfo::GetDebugNumberSet(wchar(&bufferToWriteTo)[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]) const { // (#%u.%u), #%u --> (source file Id . function Id) , function Number - int len = swprintf_s(bufferToWriteTo, MAX_FUNCTION_BODY_DEBUG_STRING_SIZE, L" (#%d.%u), #%u", + int len = swprintf_s(bufferToWriteTo, MAX_FUNCTION_BODY_DEBUG_STRING_SIZE, _u(" (#%d.%u), #%u"), (int)GetSourceContextId(), GetLocalFunctionId(), GetBody()->GetFunctionNumber()); Assert(len > 8); return bufferToWriteTo; diff --git a/lib/Backend/FunctionJITTimeInfo.h b/lib/Backend/FunctionJITTimeInfo.h index 4a32fa7abbe..78666c9bccf 100644 --- a/lib/Backend/FunctionJITTimeInfo.h +++ b/lib/Backend/FunctionJITTimeInfo.h @@ -44,8 +44,8 @@ class FunctionJITTimeInfo bool HasSharedPropertyGuards() const; bool HasSharedPropertyGuard(Js::PropertyId id) const; - wchar_t* GetDisplayName() const; - wchar_t* GetDebugNumberSet(wchar(&bufferToWriteTo)[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]) const; + char16* GetDisplayName() const; + char16* GetDebugNumberSet(wchar(&bufferToWriteTo)[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]) const; private: FunctionJITTimeDataIDL m_data; }; diff --git a/lib/Backend/GlobOpt.cpp b/lib/Backend/GlobOpt.cpp index 29bafd0a690..daffd087692 100644 --- a/lib/Backend/GlobOpt.cpp +++ b/lib/Backend/GlobOpt.cpp @@ -1837,8 +1837,8 @@ GlobOpt::MergeCapturedValues( SListBase * fromList, CapturedItemsAreEqual itemsAreEqual) { - SListBase::Iterator iterTo(toList); - SListBase::Iterator iterFrom(fromList); + typename SListBase::Iterator iterTo(toList); + typename SListBase::Iterator iterFrom(fromList); bool hasTo = iterTo.Next(); bool hasFrom = fromList == nullptr ? false : iterFrom.Next(); @@ -1849,7 +1849,7 @@ GlobOpt::MergeCapturedValues( Sym * symFrom = iterFrom.Data().Key(); Sym * symTo = iterTo.Data().Key(); - if (symFrom->m_id < symTo->m_id) + if (symFrom->m_id < symTo->m_id) { toData->changedSyms->Set(symFrom->m_id); hasFrom = iterFrom.Next(); @@ -1873,7 +1873,7 @@ GlobOpt::MergeCapturedValues( bool hasRemain = hasFrom || hasTo; if (hasRemain) { - SListBase::Iterator iterRemain(hasFrom ? iterFrom : iterTo); + typename SListBase::Iterator iterRemain(hasFrom ? iterFrom : iterTo); do { Sym * symRemain = iterRemain.Data().Key(); @@ -2173,7 +2173,7 @@ GlobOpt::MergeBlockData( if(value) { ValueInfo *const valueInfo = value->GetValueInfo(); - if(valueInfo->IsInt() || valueInfo->IsLikelyInt() && DoAggressiveIntTypeSpec()) + if(valueInfo->IsInt() || (valueInfo->IsLikelyInt() && DoAggressiveIntTypeSpec())) { toData->liveVarSyms->Set(id); } @@ -4049,8 +4049,8 @@ GlobOpt::OptArguments(IR::Instr *instr) if (!TrackArgumentsObject()) { return; - } - + } + if (instr->HasAnyLoadHeapArgsOpCode()) { if (instr->m_func->IsStackArgsEnabled()) @@ -4381,7 +4381,7 @@ GlobOpt::IsAllowedForMemOpt(IR::Instr* instr, bool isMemset, IR::RegOpnd *baseOp ); if (!hasBoundChecksRemoved) { - TRACE_MEMOP_VERBOSE(loop, instr, L"Missing bounds check optimization"); + TRACE_MEMOP_VERBOSE(loop, instr, _u("Missing bounds check optimization")); return false; } } @@ -4878,7 +4878,7 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved) } // Change LdFld on arrays, strings, and 'arguments' to LdLen when we're accessing the .length field - if (instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->m_opcode == Js::OpCode::ProfiledLdFld || instr->m_opcode == Js::OpCode::LdFld || instr->m_opcode == Js::OpCode::ScopedLdFld) + if ((instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->m_opcode == Js::OpCode::ProfiledLdFld) || instr->m_opcode == Js::OpCode::LdFld || instr->m_opcode == Js::OpCode::ScopedLdFld) { IR::Opnd * opnd = instr->GetSrc1(); Sym *sym = opnd->AsSymOpnd()->m_sym; @@ -4925,7 +4925,7 @@ GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved) this->OptArguments(instr); //StackArguments Optimization - We bail out if the index is out of range of actuals. - if ((instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) && + if ((instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) && instr->DoStackArgsOpt(this->func) && !this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, IR::BailOnStackArgsOutOfActualsRange); @@ -5517,8 +5517,8 @@ GlobOpt::OptDst( if(!prevDst || !src->IsEqualInternal(prevDst) || !( - prevInstr->GetSrc1() && dst->IsEqual(prevInstr->GetSrc1()) || - prevInstr->GetSrc2() && dst->IsEqual(prevInstr->GetSrc2()) + (prevInstr->GetSrc1() && dst->IsEqual(prevInstr->GetSrc1())) || + (prevInstr->GetSrc2() && dst->IsEqual(prevInstr->GetSrc2())) )) { break; @@ -5983,7 +5983,7 @@ GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, I } if(profiledArrayType.IsLikelyObject() && profiledArrayType.GetObjectType() == valueType.GetObjectType() && - (profiledArrayType.HasVarElements() || valueType.HasIntElements() && profiledArrayType.HasFloatElements())) + (profiledArrayType.HasVarElements() || (valueType.HasIntElements() && profiledArrayType.HasFloatElements()))) { // Merge array type we pulled from profile with type propagated by dataflow. valueType = valueType.Merge(profiledArrayType).SetHasNoMissingValues(valueType.HasNoMissingValues()); @@ -6344,9 +6344,9 @@ GlobOpt::CopyProp(IR::Opnd *opnd, IR::Instr *instr, Value *val, IR::IndirOpnd *p // Don't copy-prop operand of SIMD instr with ExtendedArg operands. Each instr should have its exclusive EA sequence. if ( - Js::IsSimd128Opcode(instr->m_opcode) && - instr->GetSrc1() != nullptr && - instr->GetSrc1()->IsRegOpnd() && + Js::IsSimd128Opcode(instr->m_opcode) && + instr->GetSrc1() != nullptr && + instr->GetSrc1()->IsRegOpnd() && instr->GetSrc2() == nullptr ) { @@ -6634,7 +6634,7 @@ GlobOpt::CopyPropReplaceOpnd(IR::Instr * instr, IR::Opnd * opnd, StackSym * copy if (this->currentBlock->loop && !this->IsLoopPrePass()) { // Try hoisting this checkObjType. - // But since this isn't the current instr being optimized, we need to play tricks with + // But since this isn't the current instr being optimized, we need to play tricks with // the byteCodeUse fields... BVSparse *currentBytecodeUses = this->byteCodeUses; PropertySym * currentPropertySymUse = this->propertySymUse; @@ -6871,7 +6871,7 @@ GlobOpt::NewIntConstantValue(const int32 intConst, IR::Instr * instr, bool isTag // This gets in the way of CSE. value = HoistConstantLoadAndPropagateValueBackward(Js::TaggedInt::ToVarUnchecked(intConst), instr, value); if (!value->GetValueInfo()->GetSymStore() && - instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4) + (instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4)) { StackSym * sym = instr->GetDst()->GetStackSym(); Assert(sym); @@ -7442,8 +7442,8 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val) if(!( profiledValueType.IsLikelyInt() && ( - dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt || - instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt + (dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt) || + (instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) ) )) { @@ -7669,8 +7669,8 @@ GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val) min1 < 0 && IntConstantBounds(min2, max2).And_0x1f().Contains(0)) { - // Src1 may be too large to represent as a signed int32, and src2 may be zero. - // Since the result can therefore be too large to represent as a signed int32, + // Src1 may be too large to represent as a signed int32, and src2 may be zero. + // Since the result can therefore be too large to represent as a signed int32, // include Number in the value type. return CreateDstUntransferredValue( ValueType::AnyNumber.SetCanBeTaggedValue(true), instr, src1Val, src2Val); @@ -7991,7 +7991,7 @@ GlobOpt::ValueNumberLdElemDst(IR::Instr **pInstr, Value *srcVal) if (instr->DoStackArgsOpt(this->func) || !( baseValueType.IsLikelyOptimizedTypedArray() || - baseValueType.IsLikelyNativeArray() && instr->IsProfiledInstr() // Specialized native array lowering for LdElem requires that it is profiled. + (baseValueType.IsLikelyNativeArray() && instr->IsProfiledInstr()) // Specialized native array lowering for LdElem requires that it is profiled. ) || (!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) || @@ -8211,8 +8211,8 @@ GlobOpt::GetPrepassValueTypeForDst( return desiredValueType; } - if(instr->GetSrc1() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Value) || - instr->GetSrc2() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Value)) + if((instr->GetSrc1() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Value)) || + (instr->GetSrc2() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Value))) { // If the desired value type is not precise, the value type of the destination is derived from the value types of the // sources. Since the value type of a source sym is not definite, the destination value type also cannot be definite. @@ -8453,7 +8453,7 @@ GlobOpt::ValueNumberTransferDstInPrepass(IR::Instr *const instr, Value *const sr // In prepass we are going to copy the value but with a different value number // for aggressive int type spec. const ValueType valueType(GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, &isValueInfoPrecise)); - if(isValueInfoPrecise || valueType == src1ValueInfo->Type() && src1ValueInfo->IsGeneric()) + if(isValueInfoPrecise || (valueType == src1ValueInfo->Type() && src1ValueInfo->IsGeneric())) { Assert(valueType == src1ValueInfo->Type()); dstVal = CopyValue(src1Val); @@ -8514,7 +8514,7 @@ GlobOpt::PropagateIntRangeBinary(IR::Instr *instr, int32 min1, int32 max1, { // Turn values like 0x1010 into 0x1111 max = 1 << Math::Log2(max); - max = (max << 1) - 1; + max = (uint32)(max << 1) - 1; min = 0; } @@ -8600,7 +8600,7 @@ GlobOpt::PropagateIntRangeBinary(IR::Instr *instr, int32 min1, int32 max1, if (max1) { max1 = 1 << Math::Log2(max1); - max1 = (max1 << 1) - 1; + max1 = (uint32)(max1 << 1) - 1; } if (max1 > 0) @@ -9660,7 +9660,7 @@ GlobOpt::IsWorthSpecializingToInt32DueToSrc(IR::Opnd *const src, Value *const va !src->GetIsDead() || !src->IsRegOpnd() || this->IsInt32TypeSpecialized(src->AsRegOpnd()->m_sym, this->currentBlock) || - this->currentBlock->loop && this->IsLive(src->AsRegOpnd()->m_sym, this->currentBlock->loop->landingPad); + (this->currentBlock->loop && this->IsLive(src->AsRegOpnd()->m_sym, this->currentBlock->loop->landingPad)); } bool @@ -9671,7 +9671,7 @@ GlobOpt::IsWorthSpecializingToInt32DueToDst(IR::Opnd *const dst) const auto sym = dst->AsRegOpnd()->m_sym; return this->IsInt32TypeSpecialized(sym, this->currentBlock) || - this->currentBlock->loop && this->IsLive(sym, this->currentBlock->loop->landingPad); + (this->currentBlock->loop && this->IsLive(sym, this->currentBlock->loop->landingPad)); } bool @@ -9685,7 +9685,7 @@ GlobOpt::IsWorthSpecializingToInt32(IR::Instr *const instr, Value *const src1Val // In addition to checking each operand and the destination, if for any reason we only have to do a maximum of two // conversions instead of the worst-case 3 conversions, it's probably worth specializing. if (IsWorthSpecializingToInt32DueToSrc(src1, src1Val) || - src2Val && IsWorthSpecializingToInt32DueToSrc(src2, src2Val)) + (src2Val && IsWorthSpecializingToInt32DueToSrc(src2, src2Val))) { return true; } @@ -9696,7 +9696,7 @@ GlobOpt::IsWorthSpecializingToInt32(IR::Instr *const instr, Value *const src1Val return true; } - if (dst->IsEqual(src1) || src2Val && (dst->IsEqual(src2) || src1->IsEqual(src2))) + if (dst->IsEqual(src1) || (src2Val && (dst->IsEqual(src2) || src1->IsEqual(src2)))) { return true; } @@ -11370,19 +11370,19 @@ GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc default: { const bool involesLargeInt32 = - src1Val && src1Val->GetValueInfo()->IsLikelyUntaggedInt() || - src2Val && src2Val->GetValueInfo()->IsLikelyUntaggedInt(); + (src1Val && src1Val->GetValueInfo()->IsLikelyUntaggedInt()) || + (src2Val && src2Val->GetValueInfo()->IsLikelyUntaggedInt()); const auto trySpecializeToFloat = [&](const bool mayOverflow) -> bool { // It has been determined that this instruction cannot be int-specialized. Need to determine whether to attempt // to float-specialize the instruction, or leave it unspecialized. - if(involesLargeInt32 + if((involesLargeInt32 #if INT32VAR && mayOverflow #endif - || (instr->m_opcode == Js::OpCode::Mul_A && !this->DoAggressiveMulIntTypeSpec()) - ) + ) || (instr->m_opcode == Js::OpCode::Mul_A && !this->DoAggressiveMulIntTypeSpec()) + ) { // An input range is completely outside the range of an int31 and the operation is likely to overflow. // Additionally, on 32-bit platforms, the value is untaggable and will be a JavascriptNumber, which is @@ -11416,8 +11416,8 @@ GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc !src2Val->GetValueInfo()->IsInt() ) ) || - instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt || - instr->GetSrc2()->IsRegOpnd() && instr->GetSrc2()->AsRegOpnd()->m_sym->m_isNotInt) + (instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) || + (instr->GetSrc2()->IsRegOpnd() && instr->GetSrc2()->AsRegOpnd()->m_sym->m_isNotInt)) { return trySpecializeToFloat(true); } @@ -11425,8 +11425,8 @@ GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc // Try to type specialize to int32 - // If one of the values is a float constant with a value that fits in a uint32 but not an int32, - // and the instruction can ignore int overflow, the source value for the purposes of int specialization + // If one of the values is a float constant with a value that fits in a uint32 but not an int32, + // and the instruction can ignore int overflow, the source value for the purposes of int specialization // would have been changed to an int constant value by ignoring overflow. But, the conversion is still lossy. if (!(src1OriginalVal && src1OriginalVal->GetValueInfo()->IsFloatConstant() && src1Val && src1Val->GetValueInfo()->HasIntConstantValue())) { @@ -11817,7 +11817,7 @@ GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc // May result in -0 return trySpecializeToFloat(false); } - if ((min1 == 0 && max1 == 0 || min2 == 0 && max2 == 0) && (max1 < 0 || max2 < 0)) + if (((min1 == 0 && max1 == 0) || (min2 == 0 && max2 == 0)) && (max1 < 0 || max2 < 0)) { // Always results in -0 return trySpecializeToFloat(false); @@ -14178,7 +14178,7 @@ GlobOpt::ToTypeSpecUse(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Valu if (opcode == Js::OpCode::FromVar) { - + if (toType == TyInt32) { Assert(valueInfo); @@ -14595,7 +14595,7 @@ GlobOpt::ToTypeSpecUse(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Valu // Src is always invariant, but check if the dst is, and then hoist. if (block->loop && ( - newFloatSym && block->loop->CanHoistInvariants() || + (newFloatSym && block->loop->CanHoistInvariants()) || this->OptIsInvariant(floatReg, block, block->loop, val, false, false) )) { @@ -15717,7 +15717,7 @@ GlobOpt::ProcessValueKills(BasicBlock *const block, GlobOptBlockData *const bloc if(IsLoopPrePass() && block->loop == rootLoopPrePass) { AnalysisAssert(rootLoopPrePass); - + for (Loop * loop = rootLoopPrePass; loop != nullptr; loop = loop->parent) { loop->jsArrayKills.SetKillsAllArrays(); @@ -16020,7 +16020,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef) baseOpnd->SetValueType(baseValueType); if(!baseValueType.IsLikelyAnyOptimizedArray() || !DoArrayCheckHoist(baseValueType, currentBlock->loop, instr) || - baseOwnerIndir && !ShouldExpectConventionalArrayIndexValue(baseOwnerIndir)) + (baseOwnerIndir && !ShouldExpectConventionalArrayIndexValue(baseOwnerIndir))) { return; } @@ -16131,7 +16131,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef) const bool headSegmentLengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->HeadSegmentLengthSym(); const bool doHeadSegmentLengthLoad = doArraySegmentLengthHoist && - (needsHeadSegmentLength || !isLikelyJsArray && needsLength) && + (needsHeadSegmentLength || (!isLikelyJsArray && needsLength)) && !headSegmentLengthIsAvailable; const bool lengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->LengthSym(); const bool doLengthLoad = @@ -16150,7 +16150,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef) { // SIMD_JS // simd load/store never call helper - canBailOutOnArrayAccessHelperCall = true; + canBailOutOnArrayAccessHelperCall = true; } else { @@ -16172,7 +16172,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef) IntConstantBounds indexConstantBounds; Value *headSegmentLengthValue = nullptr; IntConstantBounds headSegmentLengthConstantBounds; - + if (baseValueType.IsLikelyOptimizedVirtualTypedArray() && !Js::IsSimd128LoadStore(instr->m_opcode) /*Always extract bounds for SIMD */) { if (isProfilableStElem || @@ -16328,7 +16328,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef) { const JsArrayKills loopKills(loop->jsArrayKills); Value *baseValueInLoopLandingPad; - if(isLikelyJsArray && loopKills.KillsValueType(newBaseValueType) || + if((isLikelyJsArray && loopKills.KillsValueType(newBaseValueType)) || !OptIsInvariant(baseOpnd->m_sym, currentBlock, loop, baseValue, true, true, &baseValueInLoopLandingPad) || !(doArrayChecks || baseValueInLoopLandingPad->GetValueInfo()->IsObject())) { @@ -17514,7 +17514,7 @@ GlobOpt::OptArraySrc(IR::Instr * *const instrRef) baseArrayValueInfo->CreateOpnd( baseOpnd, needsHeadSegment, - needsHeadSegmentLength || !isLikelyJsArray && needsLength, + needsHeadSegmentLength || (!isLikelyJsArray && needsLength), needsLength, eliminatedLowerBoundCheck, eliminatedUpperBoundCheck, @@ -17768,7 +17768,7 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr) case Js::OpCode::DeleteElemIStrict_A: Assert(instr->GetSrc1()); if(!instr->GetSrc1()->IsIndirOpnd() || - useValueTypes && instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsNotArrayOrObjectWithArray()) + (useValueTypes && instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsNotArrayOrObjectWithArray())) { break; } @@ -17830,7 +17830,7 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr) const ValueType arrayValueType(arrayOpnd->GetValueType()); - if(!arrayOpnd->IsRegOpnd() || useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray()) + if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } @@ -17852,10 +17852,12 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr) } // Don't kill NativeArray, if there is no mismatch between array's type and element's type. - if(doNativeArrayTypeSpec && !(useValueTypes && arrayValueType.IsNativeArray() && - (arrayValueType.IsLikelyNativeIntArray() && instr->GetSrc2()->IsInt32()) || - (arrayValueType.IsLikelyNativeFloatArray() && instr->GetSrc2()->IsFloat())) - && !(useValueTypes && arrayValueType.IsNotNativeArray())) + if(doNativeArrayTypeSpec && + !(useValueTypes && arrayValueType.IsNativeArray() && + ((arrayValueType.IsLikelyNativeIntArray() && instr->GetSrc2()->IsInt32()) || + (arrayValueType.IsLikelyNativeFloatArray() && instr->GetSrc2()->IsFloat())) + ) && + !(useValueTypes && arrayValueType.IsNotNativeArray())) { kills.SetKillsNativeArrays(); } @@ -17869,7 +17871,7 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr) Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); - if(!arrayOpnd->IsRegOpnd() || useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray()) + if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } @@ -17894,7 +17896,7 @@ GlobOpt::CheckJsArrayKills(IR::Instr *const instr) IR::Opnd *const arrayOpnd = instr->FindCallArgumentOpnd(1); Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); - if(!arrayOpnd->IsRegOpnd() || useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray()) + if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } @@ -18251,10 +18253,10 @@ GlobOpt::VerifyIntSpecForIgnoringIntOverflow(IR::Instr *const instr) // doesn't generate bailouts or cause ignoring int overflow to be invalid. // MULs are allowed to start a region and have BailOutInfo since they will bailout on non-32 bit overflow. if(instr->m_opcode == Js::OpCode::Ld_A || - (!instr->HasBailOutInfo() || instr->m_opcode == Js::OpCode::Mul_I4) && + ((!instr->HasBailOutInfo() || instr->m_opcode == Js::OpCode::Mul_I4) && (!instr->GetDst() || instr->GetDst()->IsInt32()) && (!instr->GetSrc1() || instr->GetSrc1()->IsInt32()) && - (!instr->GetSrc2() || instr->GetSrc2()->IsInt32())) + (!instr->GetSrc2() || instr->GetSrc2()->IsInt32()))) { return; } @@ -18681,8 +18683,8 @@ GlobOpt::OptIsInvariant(Sym *sym, BasicBlock *block, Loop *loop, Value *srcVal, { Assert(block->globOptData.liveInt32Syms->Test(varSym->m_id)); if (!loop->landingPad->globOptData.liveInt32Syms->Test(varSym->m_id) || - loop->landingPad->globOptData.liveLossyInt32Syms->Test(varSym->m_id) && - !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)) + (loop->landingPad->globOptData.liveLossyInt32Syms->Test(varSym->m_id) && + !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id))) { // Either the int32 sym is not live in the landing pad, or it's lossy in the landing pad and the // instruction's block is using the lossless version. In either case, the instruction cannot be hoisted @@ -18830,7 +18832,7 @@ GlobOpt::OptIsInvariant( case Js::OpCode::LdLen_A: return false; - //Can't Hoist BailOnNotStackArgs, as it is necessary as InlineArgsOptimization relies on this opcode + //Can't Hoist BailOnNotStackArgs, as it is necessary as InlineArgsOptimization relies on this opcode //to decide whether to throw rejit exception or not. case Js::OpCode::BailOnNotStackArgs: return false; @@ -18971,7 +18973,7 @@ GlobOpt::OptHoistInvariant( ValueInfo *src1ValueInfo = src1Val->GetValueInfo(); ValueInfo *landingPadSrc1ValueInfo = landingPadSrc1val->GetValueInfo(); IRType dstType = dst->GetType(); - + const auto AddBailOutToFromVar = [&]() { instr->GetSrc1()->SetValueType(landingPadSrc1val->GetValueInfo()->Type()); @@ -19751,7 +19753,7 @@ GlobOpt::DoArrayCheckHoist() const bool GlobOpt::DoArrayCheckHoist(const ValueType baseValueType, Loop* loop, IR::Instr *const instr) const { - if(!DoArrayCheckHoist() || instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func)) + if(!DoArrayCheckHoist() || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func))) { return false; } @@ -19877,7 +19879,7 @@ GlobOpt::DoLdLenIntSpec(IR::Instr *const instr, const ValueType baseValueType) c if(PHASE_OFF(Js::LdLenIntSpecPhase, func) || IsTypeSpecPhaseOff(func) || (func->HasProfileInfo() && func->GetReadOnlyProfileInfo()->IsLdLenIntSpecDisabled()) || - instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func)) + (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func))) { return false; } @@ -19895,7 +19897,7 @@ GlobOpt::DoLdLenIntSpec(IR::Instr *const instr, const ValueType baseValueType) c Assert(!instr || baseValueType == instr->GetSrc1()->GetValueType()); return baseValueType.HasBeenString() || - baseValueType.IsLikelyAnyOptimizedArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray; + (baseValueType.IsLikelyAnyOptimizedArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray); } bool @@ -20785,7 +20787,7 @@ GlobOpt::TraceSettings() Output::Print(_u(" FloatTypeSpec: %s\r\n"), this->DoFloatTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" AggressiveIntTypeSpec: %s\r\n"), this->DoAggressiveIntTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" LossyIntTypeSpec: %s\r\n"), this->DoLossyIntTypeSpec() ? _u("enabled") : _u("disabled")); - Output::Print(_u(" ArrayCheckHoist: %s\r\n"), (this->func->HasProfileInfo() && this->func->GetReadOnlyProfileInfo()->IsArrayCheckHoistDisabled(func->IsLoopBody())) ? L"disabled" : L"enabled"); + Output::Print(_u(" ArrayCheckHoist: %s\r\n"), (this->func->HasProfileInfo() && this->func->GetReadOnlyProfileInfo()->IsArrayCheckHoistDisabled(func->IsLoopBody())) ? _u("disabled") : _u("enabled")); Output::Print(_u(" ImplicitCallFlags: %s\r\n"), Js::DynamicProfileInfo::GetImplicitCallFlagsString(this->func->m_fg->implicitCallFlags)); for (Loop * loop = this->func->m_fg->loopList; loop != NULL; loop = loop->next) { @@ -21486,11 +21488,11 @@ GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitD char16 loopCountBuf[loopCountBufSize]; if (loopCount->LoopCountMinusOneSym()) { - _snwprintf_s(loopCountBuf, loopCountBufSize, _u("s%u"), loopCount->LoopCountMinusOneSym()->m_id); + swprintf_s(loopCountBuf, _u("s%u"), loopCount->LoopCountMinusOneSym()->m_id); } else { - _snwprintf_s(loopCountBuf, loopCountBufSize, _u("%u"), loopCount->LoopCountMinusOneConstantValue() + 1); + swprintf_s(loopCountBuf, _u("%u"), loopCount->LoopCountMinusOneConstantValue() + 1); } if (isMemset) { @@ -21499,7 +21501,7 @@ GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitD char16 constBuf[constBufSize]; if (candidate->srcSym) { - _snwprintf_s(constBuf, constBufSize, _u("s%u"), candidate->srcSym->m_id); + swprintf_s(constBuf, _u("s%u"), candidate->srcSym->m_id); } else { @@ -21509,18 +21511,18 @@ GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitD case TyInt16: case TyInt32: case TyInt64: - _snwprintf_s(constBuf, constBufSize, sizeof(IntConstType) == 8 ? _u("%lld") : _u("%d"), candidate->constant.u.intConst.value); + swprintf_s(constBuf, sizeof(IntConstType) == 8 ? _u("%lld") : _u("%d"), candidate->constant.u.intConst.value); break; case TyFloat32: case TyFloat64: - _snwprintf_s(constBuf, constBufSize, _u("%.4f"), candidate->constant.u.floatConst.value); + swprintf_s(constBuf, _u("%.4f"), candidate->constant.u.floatConst.value); break; case TyVar: - _snwprintf_s(constBuf, constBufSize, sizeof(Js::Var) == 8 ? _u("0x%.16llX") : _u("0x%.8X"), candidate->constant.u.varConst.value); + swprintf_s(constBuf, sizeof(Js::Var) == 8 ? _u("0x%.16llX") : _u("0x%.8X"), candidate->constant.u.varConst.value); break; default: AssertMsg(false, "Unsupported constant type"); - _snwprintf_s(constBuf, constBufSize, _u("Unknown")); + swprintf_s(constBuf, _u("Unknown")); break; } } diff --git a/lib/Backend/GlobOpt.h b/lib/Backend/GlobOpt.h index e2eee8f0623..4fa03accfbc 100644 --- a/lib/Backend/GlobOpt.h +++ b/lib/Backend/GlobOpt.h @@ -623,7 +623,7 @@ class ArrayValueInfo : public ValueInfo Assert(allocator); return - copyHeadSegment && headSegmentSym || copyHeadSegmentLength && headSegmentLengthSym || copyLength && lengthSym + (copyHeadSegment && headSegmentSym) || (copyHeadSegmentLength && headSegmentLengthSym) || (copyLength && lengthSym) ? New( allocator, Type(), @@ -1140,8 +1140,8 @@ class JsArrayKills return killsAllArrays || - killsArraysWithNoMissingValues && valueType.HasNoMissingValues() || - killsNativeArrays && !valueType.HasVarElements(); + (killsArraysWithNoMissingValues && valueType.HasNoMissingValues()) || + (killsNativeArrays && !valueType.HasVarElements()); } bool AreSubsetOf(const JsArrayKills &other) const diff --git a/lib/Backend/GlobOptBailOut.cpp b/lib/Backend/GlobOptBailOut.cpp index 763e97f3694..804d9ae751b 100644 --- a/lib/Backend/GlobOptBailOut.cpp +++ b/lib/Backend/GlobOptBailOut.cpp @@ -986,7 +986,7 @@ GlobOpt::FillBailOutInfo(BasicBlock *block, BailOutInfo * bailOutInfo) #ifdef _M_IX86 if (this->currentRegion && this->currentRegion->GetType() == RegionTypeTry) { - // For a bailout in argument evaluation from an EH region, the esp is offset by the TryCatch helper’s frame. So, the argouts are not actually pushed at the + // For a bailout in argument evaluation from an EH region, the esp is offset by the TryCatch helper�s frame. So, the argouts are not actually pushed at the // offsets stored in the bailout record, which are relative to ebp. Need to restore the argouts from the actual value of esp before calling the Bailout helper. // For nested calls, argouts for the outer call need to be restored from an offset of stack-adjustment-done-by-the-inner-call from esp. if (startCallNumber + 1 == bailOutInfo->startCallCount) @@ -1250,8 +1250,8 @@ GlobOpt::MayNeedBailOnImplicitCall(const IR::Instr * instr, Value *src1Val, Valu return !( baseValueType.IsString() || - baseValueType.IsAnyArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray || - instr->HasBailOutInfo() && instr->GetBailOutKindNoBits() == IR::BailOutOnIrregularLength // guarantees no implicit calls + (baseValueType.IsAnyArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray) || + (instr->HasBailOutInfo() && instr->GetBailOutKindNoBits() == IR::BailOutOnIrregularLength) // guarantees no implicit calls ); } @@ -1277,7 +1277,7 @@ GlobOpt::MayNeedBailOnImplicitCall(const IR::Instr * instr, Value *src1Val, Valu !( (bailOutKind & ~IR::BailOutKindBits) == IR::BailOutConventionalTypedArrayAccessOnly || bailOutKind & IR::BailOutOnArrayAccessHelperCall || - isLdElem && bailOutKind & IR::BailOutConventionalNativeArrayAccessOnly + (isLdElem && bailOutKind & IR::BailOutConventionalNativeArrayAccessOnly) ); } diff --git a/lib/Backend/GlobOptFields.cpp b/lib/Backend/GlobOptFields.cpp index 1bf40d0c3fb..d57f26408ad 100644 --- a/lib/Backend/GlobOptFields.cpp +++ b/lib/Backend/GlobOptFields.cpp @@ -373,7 +373,7 @@ GlobOpt::KillLiveElems(IR::IndirOpnd * indirOpnd, BVSparse * indexOpnd && ( indexOpnd->m_sym->m_isNotInt || - inGlobOpt && !indexOpnd->GetValueType().IsNumber() && !IsTypeSpecialized(indexOpnd->m_sym, &blockData) + (inGlobOpt && !indexOpnd->GetValueType().IsNumber() && !IsTypeSpecialized(indexOpnd->m_sym, &blockData)) ) )) { diff --git a/lib/Backend/GlobOptIntBounds.cpp b/lib/Backend/GlobOptIntBounds.cpp index 730e155997f..d0880029868 100644 --- a/lib/Backend/GlobOptIntBounds.cpp +++ b/lib/Backend/GlobOptIntBounds.cpp @@ -538,7 +538,7 @@ void GlobOpt::UpdateIntBoundsForEqualBranch( { Assert(src1Value); - if(!DoPathDependentValues() || src2Value && src1Value->GetValueNumber() == src2Value->GetValueNumber()) + if(!DoPathDependentValues() || (src2Value && src1Value->GetValueNumber() == src2Value->GetValueNumber())) { return; } @@ -591,7 +591,7 @@ void GlobOpt::UpdateIntBoundsForNotEqualBranch( { Assert(src1Value); - if(!DoPathDependentValues() || src2Value && src1Value->GetValueNumber() == src2Value->GetValueNumber()) + if(!DoPathDependentValues() || (src2Value && src1Value->GetValueNumber() == src2Value->GetValueNumber())) { return; } diff --git a/lib/Backend/IR.cpp b/lib/Backend/IR.cpp index c88a47d4b9e..870e31c8c0a 100644 --- a/lib/Backend/IR.cpp +++ b/lib/Backend/IR.cpp @@ -145,7 +145,7 @@ Instr::TryOptimizeInstrWithFixedDataProperty(IR::Instr **pInstr, GlobOpt * globo bool Instr::IsEqual(IR::Instr *compareInstr) const { - Assert(this && compareInstr); + Assert(compareInstr); if (this->GetKind() == compareInstr->GetKind() && this->m_opcode == compareInstr->m_opcode) { @@ -3221,8 +3221,8 @@ bool Instr::HasAnyImplicitCalls() const } if (OpCodeAttr::OpndHasImplicitCall(this->m_opcode)) { - if (this->m_dst && - ((this->m_dst->IsSymOpnd() && this->m_dst->AsSymOpnd()->m_sym->IsPropertySym()) || + if (this->m_dst && + ((this->m_dst->IsSymOpnd() && this->m_dst->AsSymOpnd()->m_sym->IsPropertySym()) || this->m_dst->IsIndirOpnd())) { return true; @@ -3438,7 +3438,7 @@ IR::Instr* IR::Instr::NewConstantLoad(IR::RegOpnd* dstOpnd, intptr_t varConst, V } else if(type.IsNumber()) { - // TODO (michhol): OOP JIT. we may need to unbox before sending over const table + // TODO (michhol): OOP JIT. we may need to unbox before sending over const table if (!func->IsOOPJIT()) { @@ -3447,7 +3447,7 @@ IR::Instr* IR::Instr::NewConstantLoad(IR::RegOpnd* dstOpnd, intptr_t varConst, V else { srcOpnd = IR::FloatConstOpnd::New((Js::Var)varConst, TyFloat64, func -#if !FLOATVAR +#if !FLOATVAR ,varLocal #endif ); @@ -3466,7 +3466,7 @@ IR::Instr* IR::Instr::NewConstantLoad(IR::RegOpnd* dstOpnd, intptr_t varConst, V // treated as int32s for the purposes of int specialization. dstOpnd->m_sym->m_isNotInt = !Js::JavascriptNumber::IsInt32OrUInt32(((IR::FloatConstOpnd*)srcOpnd)->m_value); - + #endif } } @@ -4029,6 +4029,9 @@ Instr::Dump(IRDumpFlags flags) Output::SkipToColumn(38); }; + // forward decl before goto statement + Opnd * dst = nullptr; + if(m_opcode == Js::OpCode::BoundCheck || m_opcode == Js::OpCode::UnsignedBoundCheck) { PrintOpCodeName(); @@ -4094,7 +4097,7 @@ Instr::Dump(IRDumpFlags flags) Output::SkipToColumn(4); - Opnd * dst = this->GetDst(); + dst = this->GetDst(); if (dst) { @@ -4182,7 +4185,7 @@ Instr::Dump(IRDumpFlags flags) if(branchInstr->m_isMultiBranch && branchInstr->IsMultiBranch()) { IR::MultiBranchInstr * multiBranchInstr = branchInstr->AsMultiBrInstr(); - + // If this MultiBranchInstr has been lowered to a machine instruction, which means // its opcode is not Js::OpCode::MultiBr, there is no need to print the labels. if (this->m_opcode == Js::OpCode::MultiBr) @@ -4216,27 +4219,30 @@ Instr::Dump(IRDumpFlags flags) Output::Print(_u("#%d"), this->AsPragmaInstr()->m_statementIndex); } - Opnd * src1 = this->GetSrc1(); - if (this->m_opcode == Js::OpCode::NewScFunc || this->m_opcode == Js::OpCode::NewScGenFunc) + // scope { - Assert(src1->IsIntConstOpnd()); - Js::ParseableFunctionInfo * function = nullptr; - if (!m_func->IsOOPJIT()) + Opnd * src1 = this->GetSrc1(); + if (this->m_opcode == Js::OpCode::NewScFunc || this->m_opcode == Js::OpCode::NewScGenFunc) { - function = ((Js::ParseableFunctionInfo *)m_func->GetJITFunctionBody()->GetAddr())->GetNestedFunctionForExecution((uint)src1->AsIntConstOpnd()->GetValue())->GetParseableFunctionInfo(); + Assert(src1->IsIntConstOpnd()); + Js::ParseableFunctionInfo * function = nullptr; + if (!m_func->IsOOPJIT()) + { + function = ((Js::ParseableFunctionInfo *)m_func->GetJITFunctionBody()->GetAddr())->GetNestedFunctionForExecution((uint)src1->AsIntConstOpnd()->GetValue())->GetParseableFunctionInfo(); + } + Output::Print(_u("func:%s()"), function ? function->GetDisplayName() : _u("???")); + Output::Print(_u(", env:")); + this->GetSrc2()->AsRegOpnd()->m_sym->Dump(flags); } - Output::Print(_u("func:%s()"), function ? function->GetDisplayName() : _u("???")); - Output::Print(_u(", env:")); - this->GetSrc2()->AsRegOpnd()->m_sym->Dump(flags); - } - else if (src1) - { - src1->Dump(flags, this->m_func); - Opnd * src2 = this->GetSrc2(); - if (src2) + else if (src1) { - Output::Print(_u(", ")); - src2->Dump(flags, this->m_func); + src1->Dump(flags, this->m_func); + Opnd * src2 = this->GetSrc2(); + if (src2) + { + Output::Print(_u(", ")); + src2->Dump(flags, this->m_func); + } } } diff --git a/lib/Backend/IR.h b/lib/Backend/IR.h index 29e394cc5ae..0e7b6b1802c 100644 --- a/lib/Backend/IR.h +++ b/lib/Backend/IR.h @@ -435,13 +435,13 @@ class Instr private: void ClearNumber() { this->m_number = 0; } void SetNumber(uint32 number); - friend class Func; - friend class Lowerer; + friend class ::Func; + friend class ::Lowerer; void SetByteCodeOffset(uint32 number); - friend class IRBuilder; - friend class IRBuilderAsmJs; - friend class FlowGraph; + friend class ::IRBuilder; + friend class ::IRBuilderAsmJs; + friend class ::FlowGraph; void SetBailOutKind_NoAssert(const IR::BailOutKind bailOutKind); diff --git a/lib/Backend/IRBuilder.cpp b/lib/Backend/IRBuilder.cpp index 9b4d00e08e4..43c7ed15da8 100644 --- a/lib/Backend/IRBuilder.cpp +++ b/lib/Backend/IRBuilder.cpp @@ -791,8 +791,8 @@ IRBuilder::Build() { bool needBailoutForHelper = CONFIG_FLAG(EnableContinueAfterExceptionWrappersForHelpers) && (OpCodeAttr::NeedsPostOpDbgBailOut(newOpcode) || - m_lastInstr->m_opcode == Js::OpCode::CallHelper && m_lastInstr->GetSrc1() && - HelperMethodAttributes::CanThrow(m_lastInstr->GetSrc1()->AsHelperCallOpnd()->m_fnHelper)); + (m_lastInstr->m_opcode == Js::OpCode::CallHelper && m_lastInstr->GetSrc1() && + HelperMethodAttributes::CanThrow(m_lastInstr->GetSrc1()->AsHelperCallOpnd()->m_fnHelper))); if (needBailoutForHelper) { @@ -2002,7 +2002,7 @@ IRBuilder::BuildProfiledReg2(Js::OpCode newOpcode, uint32 offset, Js::RegSlot ds ValueType arrayType(ldElemInfo->GetArrayType()); if(arrayType.IsLikelyNativeArray() && ( - !(m_func->GetTopFunc()->HasTry() && !m_func->GetTopFunc()->DoOptimizeTryCatch()) && m_func->GetWeakFuncRef() && !m_func->HasArrayInfo() || + (!(m_func->GetTopFunc()->HasTry() && !m_func->GetTopFunc()->DoOptimizeTryCatch()) && m_func->GetWeakFuncRef() && !m_func->HasArrayInfo()) || m_func->IsJitInDebugMode() )) { @@ -5282,7 +5282,7 @@ IRBuilder::BuildElementI(Js::OpCode newOpcode, uint32 offset, Js::RegSlot baseRe { if(arrayType.IsLikelyNativeArray() && ( - !(m_func->GetTopFunc()->HasTry() && !m_func->GetTopFunc()->DoOptimizeTryCatch()) && m_func->GetWeakFuncRef() && !m_func->HasArrayInfo() || + (!(m_func->GetTopFunc()->HasTry() && !m_func->GetTopFunc()->DoOptimizeTryCatch()) && m_func->GetWeakFuncRef() && !m_func->HasArrayInfo()) || m_func->IsJitInDebugMode() )) { diff --git a/lib/Backend/IRType.cpp b/lib/Backend/IRType.cpp index 30ca14ed8a0..82ce0aed64e 100644 --- a/lib/Backend/IRType.cpp +++ b/lib/Backend/IRType.cpp @@ -28,7 +28,7 @@ int const TyBaseType[] = { #undef IRTYPE }; -char16 * const TyDumpName[] = { +const char16 * const TyDumpName[] = { #define IRTYPE(ucname, baseType, size, bitSize, enRegOk, dname) _u(#dname), #include "IRTypeList.h" #undef IRTYPE diff --git a/lib/Backend/InductionVariable.cpp b/lib/Backend/InductionVariable.cpp index 24dc680f654..bbb71e7a3ee 100644 --- a/lib/Backend/InductionVariable.cpp +++ b/lib/Backend/InductionVariable.cpp @@ -70,8 +70,8 @@ const IntConstantBounds &InductionVariable::ChangeBounds() const bool InductionVariable::IsChangeUnidirectional() const { return - ChangeBounds().LowerBound() >= 0 && ChangeBounds().UpperBound() != 0 || - ChangeBounds().UpperBound() <= 0 && ChangeBounds().LowerBound() != 0; + (ChangeBounds().LowerBound() >= 0 && ChangeBounds().UpperBound() != 0) || + (ChangeBounds().UpperBound() <= 0 && ChangeBounds().LowerBound() != 0); } bool InductionVariable::Add(const int n) diff --git a/lib/Backend/Inline.cpp b/lib/Backend/Inline.cpp index 77839f4a907..e06bb7128af 100644 --- a/lib/Backend/Inline.cpp +++ b/lib/Backend/Inline.cpp @@ -1287,8 +1287,13 @@ Inline::TryOptimizeCallInstrWithFixedMethod(IR::Instr *callInstr, const Function char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; bool printFixedFieldsTrace = - ((PHASE_TRACE(Js::FixedMethodsPhase, callInstr->m_func) || PHASE_TESTTRACE(Js::FixedMethodsPhase, callInstr->m_func) || - (isCtor && PHASE_TRACE(Js::FixedNewObjPhase, callInstr->m_func) || PHASE_TESTTRACE(Js::FixedNewObjPhase, callInstr->m_func))) && !dontOptimizeJustCheck); + ( + PHASE_TRACE(Js::FixedMethodsPhase, callInstr->m_func) || + PHASE_TESTTRACE(Js::FixedMethodsPhase, callInstr->m_func) || + (isCtor && ( + PHASE_TRACE(Js::FixedNewObjPhase, callInstr->m_func) || + PHASE_TESTTRACE(Js::FixedNewObjPhase, callInstr->m_func))) + ) && !dontOptimizeJustCheck; if (printFixedFieldsTrace) { @@ -1297,7 +1302,7 @@ Inline::TryOptimizeCallInstrWithFixedMethod(IR::Instr *callInstr, const Function Output::Print(_u("FixedFields: function %s (%s): considering method (%s %s): polymorphic = %d, built-in = %d, ctor = %d, inlined = %d, functionInfo = %p.\n"), callInstr->m_func->GetJITFunctionBody()->GetDisplayName(), callInstr->m_func->GetDebugNumberSet(debugStringBuffer), calleeName, - calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : L"(null)", + calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"), isPolymorphic, isBuiltIn, isCtor, isInlined, inlineeInfo ? inlineeInfo->GetFunctionInfoAddr() : 0); Output::Flush(); } @@ -1319,8 +1324,8 @@ Inline::TryOptimizeCallInstrWithFixedMethod(IR::Instr *callInstr, const Function Output::Print(_u("FixedFields: function %s (%s): %s non-fixed method (%s %s), because callee is not single def.\n"), callInstr->m_func->GetJITFunctionBody()->GetDisplayName(), callInstr->m_func->GetDebugNumberSet(debugStringBuffer), - inlineeInfo != nullptr ? L"inlining" : L"calling", calleeName, - calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : L"(null)"); + inlineeInfo != nullptr ? _u("inlining") : _u("calling"), calleeName, + calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : _u("(null)")); Output::Flush(); } #endif @@ -1344,8 +1349,8 @@ Inline::TryOptimizeCallInstrWithFixedMethod(IR::Instr *callInstr, const Function Output::Print(_u("FixedFields: function %s (%s): %s non-fixed method (%s %s), because callee does not come from LdMethodFld.\n"), callInstr->m_func->GetJITFunctionBody()->GetDisplayName(), callInstr->m_func->GetDebugNumberSet(debugStringBuffer), - inlineeInfo != nullptr ? L"inlining" : L"calling", calleeName, - calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : L"(null)"); + inlineeInfo != nullptr ? _u("inlining") : _u("calling"), calleeName, + calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : _u("(null)")); Output::Flush(); } #endif @@ -1378,8 +1383,8 @@ Inline::TryOptimizeCallInstrWithFixedMethod(IR::Instr *callInstr, const Function Output::Print(_u("FixedFields: function %s (#%u): %s non-fixed method %s (%s #%u) (cache id: %d), because %s fixed %s %s is disabled.\n"), callInstr->m_func->GetJITFunctionBody()->GetDisplayName(), callInstr->m_func->GetDebugNumberSet(debugStringBuffer), - inlineeInfo != nullptr ? L"inlining" : L"calling", methodPropertyRecord->GetBuffer(), calleeName, - calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : L"(null)", + inlineeInfo != nullptr ? _u("inlining") : _u("calling"), methodPropertyRecord->GetBuffer(), calleeName, + calleeFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"), methodPropertyOpnd->m_inlineCacheIndex, isInlined ? _u("inlining") : _u("calling"), isBuiltIn ? _u("built-in") : _u("script"), isCtor ? _u("ctors") : _u("methods")); Output::Flush(); @@ -1482,7 +1487,7 @@ Inline::TryOptimizeCallInstrWithFixedMethod(IR::Instr *callInstr, const Function const char16* fixedFunctionNumbers = fixedFunctionBody ? fixedFunctionBody->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"); JITTimeFunctionBody* profileFunctionBody = inlineeInfo->GetBody(); const char16* profileFunctionName = profileFunctionBody != nullptr ? profileFunctionBody->GetDisplayName() : _u(""); - const wchar_t* profileFunctionNumbers = profileFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer3) : L"(null)"; + const char16* profileFunctionNumbers = profileFunctionBody ? inlineeInfo->GetDebugNumberSet(debugStringBuffer3) : _u("(null)"); if (PHASE_TRACE(Js::FixedMethodsPhase, callInstr->m_func)) { @@ -1579,9 +1584,9 @@ Inline::TryOptimizeCallInstrWithFixedMethod(IR::Instr *callInstr, const Function Output::Print(_u("FixedNewObj: function %s (%s): fixed new object for %s with %s ctor %s (%s %s)%s\n"), callInstr->m_func->GetJITFunctionBody()->GetDisplayName(), callInstr->m_func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(callInstr->m_opcode), - inlineeInfo != nullptr ? L"inlined" : L"called", + inlineeInfo != nullptr ? _u("inlined") : _u("called"), methodPropertyRecord->GetBuffer(), fixedFunctionName, fixedFunctionNumbers, - constructorCache->SkipNewScObject() ? L" skip default object" : L""); + constructorCache->SkipNewScObject() ? _u(" skip default object") : _u("")); Output::Flush(); } #endif @@ -2560,7 +2565,7 @@ bool Inline::InlineApplyTarget(IR::Instr *callInstr, const FunctionJITTimeInfo* { safeThis = false; } - } + } IR::Instr* argObjByteCodeArgoutCapture = argumentsObjArgOut->GetBytecodeArgOutCapture(); argObjByteCodeArgoutCapture->GetDst()->GetStackSym()->m_nonEscapingArgObjAlias = true; @@ -3641,10 +3646,10 @@ Inline::InlineScriptFunction(IR::Instr *callInstr, const FunctionJITTimeInfo *co Func *funcCaller = callInstr->m_func; JITTimeFunctionBody *funcBody = inlineeData->GetBody(); - - // We don't do stack args optimization in jitted loop body (because of lack of information about the code before and after the loop) + + // We don't do stack args optimization in jitted loop body (because of lack of information about the code before and after the loop) // and we turn off stack arg optimization for the whole inline chain if we can't do it for one of the functionss. - // Inlining a function that uses arguments object could potentially hurt perf because we'll have to create arguments object on the + // Inlining a function that uses arguments object could potentially hurt perf because we'll have to create arguments object on the // heap for that function (versus otherwise the function will be jitted and have its arguments object creation optimized). // TODO: Allow arguments object creation to be optimized on a function level instead of an all-or-nothing approach. if (callInstr->m_func->IsLoopBody() && funcBody->UsesArgumentsObject()) @@ -5158,7 +5163,7 @@ Inline::GetInlineeHasArgumentObject(Func * inlinee) if (builtInOpnd->IsAddrOpnd()) { Assert(builtInOpnd->AsAddrOpnd()->m_isFunction); - + Js::BuiltinFunction builtinFunction = Js::JavascriptLibrary::GetBuiltInForFuncInfo(((JITTimeFixedField*)builtInOpnd->AsAddrOpnd()->m_metadata)->GetFuncInfoAddr(), this->topFunc->GetThreadContextInfo()); if (builtinFunction == Js::BuiltinFunction::JavascriptFunction_Apply) { @@ -5437,20 +5442,20 @@ Inline::Simd128FixLoadStoreInstr(Js::BuiltinFunction builtInId, IR::Instr * call #if defined(ENABLE_DEBUG_CONFIG_OPTIONS) // static -void Inline::TraceInlining(const FunctionJITTimeInfo *const inliner, const wchar_t* inlineeName, const wchar_t* inlineeFunctionIdandNumberString, uint inlineeByteCodeCount, +void Inline::TraceInlining(const FunctionJITTimeInfo *const inliner, const char16* inlineeName, const char16* inlineeFunctionIdandNumberString, uint inlineeByteCodeCount, const FunctionJITTimeInfo* topFunc, uint inlinedByteCodeCount, const FunctionJITTimeInfo *const inlinee, uint callSiteId, bool inLoopBody, uint builtIn) { - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer3[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer3[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; if (inlineeName == nullptr) { - int len = swprintf_s(debugStringBuffer3, MAX_FUNCTION_BODY_DEBUG_STRING_SIZE, L"built In Id: %u", builtIn); + int len = swprintf_s(debugStringBuffer3, MAX_FUNCTION_BODY_DEBUG_STRING_SIZE, _u("built In Id: %u"), builtIn); Assert(len > 14); inlineeName = debugStringBuffer3; } - INLINE_TESTTRACE(L"INLINING %s: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\tCallSiteId: %d\n", + INLINE_TESTTRACE(_u("INLINING %s: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\tCallSiteId: %d\n"), inLoopBody ? _u("IN LOOP BODY") : _u(""), inlineeName, inlineeFunctionIdandNumberString, inlineeByteCodeCount, inliner->GetBody()->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer), inliner->GetBody()->GetByteCodeCount(), inlinedByteCodeCount, @@ -5459,7 +5464,7 @@ void Inline::TraceInlining(const FunctionJITTimeInfo *const inliner, const wchar callSiteId ); - INLINE_TRACE(L"INLINING %s: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\tCallSiteId: %d\n", + INLINE_TRACE(_u("INLINING %s: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\tCallSiteId: %d\n"), inLoopBody ? _u("IN LOOP BODY") : _u(""), inlineeName, inlineeFunctionIdandNumberString, inlineeByteCodeCount, inliner->GetBody()->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer), inliner->GetBody()->GetByteCodeCount(), inlinedByteCodeCount, @@ -5479,13 +5484,13 @@ void Inline::TraceInlining(const FunctionJITTimeInfo *const inliner, const wchar if (inliner->GetSourceContextId() != inlinee->GetSourceContextId()) { - INLINE_TESTTRACE(L"INLINING_ACROSS_FILES: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\n", + INLINE_TESTTRACE(_u("INLINING_ACROSS_FILES: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inlinee->GetBody()->GetByteCodeCount(), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2), inliner->GetBody()->GetByteCodeCount(), inlinedByteCodeCount, topFunc->GetBody()->GetDisplayName(), topFunc->GetDebugNumberSet(debugStringBuffer3), topFunc->GetBody()->GetByteCodeCount() ); - INLINE_TRACE(L"INLINING_ACROSS_FILES: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\n", + INLINE_TRACE(_u("INLINING_ACROSS_FILES: Inlinee: %s (%s)\tSize: %d\tCaller: %s (%s)\tSize: %d\tInlineCount: %d\tRoot: %s (%s)\tSize: %d\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inlinee->GetBody()->GetByteCodeCount(), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2), inliner->GetBody()->GetByteCodeCount(), inlinedByteCodeCount, topFunc->GetBody()->GetDisplayName(), topFunc->GetDebugNumberSet(debugStringBuffer3), topFunc->GetBody()->GetByteCodeCount() diff --git a/lib/Backend/Inline.h b/lib/Backend/Inline.h index 9ef22d3ec44..05557ddbbec 100644 --- a/lib/Backend/Inline.h +++ b/lib/Backend/Inline.h @@ -138,7 +138,7 @@ class Inline void SetIsInInlinedApplyCall(bool inInlinedApplyCall) { this->isInInlinedApplyCall = inInlinedApplyCall; } #if defined(ENABLE_DEBUG_CONFIG_OPTIONS) - static void TraceInlining(const FunctionJITTimeInfo *const inliner, const wchar_t* inlineeName, const wchar_t* inlineeFunctionIdandNumberString, uint inlineeByteCodeCount, + static void TraceInlining(const FunctionJITTimeInfo *const inliner, const char16* inlineeName, const char16* inlineeFunctionIdandNumberString, uint inlineeByteCodeCount, const FunctionJITTimeInfo* topFunc, uint inlinedByteCodeCount, const FunctionJITTimeInfo *const inlinee, uint callSiteId, bool inLoopBody, uint builtIn = -1); #endif }; diff --git a/lib/Backend/InliningDecider.cpp b/lib/Backend/InliningDecider.cpp index 7d1b734d22e..f56bb6a1f39 100644 --- a/lib/Backend/InliningDecider.cpp +++ b/lib/Backend/InliningDecider.cpp @@ -649,8 +649,8 @@ bool InliningDecider::GetBuiltInInfoCommon( bool InliningDecider::CanRecursivelyInline(Js::FunctionBody * inlinee, Js::FunctionBody *inliner, bool allowRecursiveInlining, uint recursiveInlineDepth) { #if defined(DBG_DUMP) || defined(ENABLE_DEBUG_CONFIG_OPTIONS) - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; #endif @@ -659,7 +659,7 @@ bool InliningDecider::CanRecursivelyInline(Js::FunctionBody * inlinee, Js::Funct && inlinee == inliner && inlinee->CanInlineRecursively(recursiveInlineDepth)) { - INLINE_TESTTRACE(L"INLINING: Inlined recursively\tInlinee: %s (%s)\tCaller: %s (%s)\tDepth: %d\n", + INLINE_TESTTRACE(_u("INLINING: Inlined recursively\tInlinee: %s (%s)\tCaller: %s (%s)\tDepth: %d\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2), recursiveInlineDepth); return true; @@ -667,7 +667,7 @@ bool InliningDecider::CanRecursivelyInline(Js::FunctionBody * inlinee, Js::Funct if (!inlinee->CanInlineAgain()) { - INLINE_TESTTRACE(L"INLINING: Skip Inline: Do not inline recursive functions\tInlinee: %s (%s)\tCaller: %s (%s)\n", + INLINE_TESTTRACE(_u("INLINING: Skip Inline: Do not inline recursive functions\tInlinee: %s (%s)\tCaller: %s (%s)\n"), inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer), inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2)); return false; @@ -759,9 +759,9 @@ bool InliningDecider::DeciderInlineIntoInliner(Js::FunctionBody * inlinee, Js::F } #if ENABLE_DEBUG_CONFIG_OPTIONS - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer3[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer3[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; #endif if (inlinee->GetHasLoops()) @@ -874,14 +874,14 @@ bool InliningDecider::DeciderInlineIntoInliner(Js::FunctionBody * inlinee, Js::F bool InliningDecider::ContinueInliningUserDefinedFunctions(uint32 bytecodeInlinedCount) const { #if ENABLE_DEBUG_CONFIG_OPTIONS - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; #endif if (PHASE_FORCE(Js::InlinePhase, this->topFunc) || bytecodeInlinedCount <= (uint)this->threshold.inlineCountMax) { return true; } - INLINE_TESTTRACE(L"INLINING: Skip Inline: InlineCountMax threshold %d, reached: %s (#%s)\n", + INLINE_TESTTRACE(_u("INLINING: Skip Inline: InlineCountMax threshold %d, reached: %s (#%s)\n"), (uint)this->threshold.inlineCountMax, this->topFunc->GetDisplayName(), this->topFunc->GetDebugNumberSet(debugStringBuffer)); diff --git a/lib/Backend/InliningHeuristics.h b/lib/Backend/InliningHeuristics.h index 66864331cf0..6e133ec0a17 100644 --- a/lib/Backend/InliningHeuristics.h +++ b/lib/Backend/InliningHeuristics.h @@ -4,6 +4,8 @@ //------------------------------------------------------------------------------------------------------- #pragma once +class InliningDecider; + struct InliningThreshold { uint nonLoadByteCodeCount; @@ -26,7 +28,7 @@ struct InliningThreshold class InliningHeuristics { - friend class InliningDecider; + friend class ::InliningDecider; const FunctionJITTimeInfo * topFunc; InliningThreshold threshold; diff --git a/lib/Backend/IntBounds.cpp b/lib/Backend/IntBounds.cpp index 2c8bbb79f04..8a8a2c27494 100644 --- a/lib/Backend/IntBounds.cpp +++ b/lib/Backend/IntBounds.cpp @@ -304,7 +304,8 @@ bool IntBounds::IsGreaterThanOrEqualTo(const int constantValue, const int consta if(offset == 1) return constantValue > constantBoundBase; - const int constantBound = constantBoundBase + offset; + // use unsigned to avoid signed int overflow + const int constantBound = (unsigned)constantBoundBase + (unsigned)offset; return offset >= 0 ? constantBound >= constantBoundBase && constantValue >= constantBound @@ -318,7 +319,8 @@ bool IntBounds::IsLessThanOrEqualTo(const int constantValue, const int constantB if(offset == -1) return constantValue < constantBoundBase; - const int constantBound = constantBoundBase + offset; + // use unsigned to avoid signed int overflow + const int constantBound = (unsigned)constantBoundBase + (unsigned)offset; return offset >= 0 ? constantBound < constantBoundBase || constantValue <= constantBound diff --git a/lib/Backend/IntConstantBounds.h b/lib/Backend/IntConstantBounds.h index 8f961a5ade8..86b43caa3b2 100644 --- a/lib/Backend/IntConstantBounds.h +++ b/lib/Backend/IntConstantBounds.h @@ -65,7 +65,7 @@ class IntConstantBounds IntConstantBounds And_0x1f() const { const int32 mask = 0x1f; - if(static_cast(upperBound - lowerBound) >= static_cast(mask) || + if(static_cast(upperBound) - static_cast(lowerBound) >= static_cast(mask) || (lowerBound & mask) > (upperBound & mask)) { // The range contains all items in the set {0-mask}, or the range crosses a boundary of {0-mask}. Since we cannot diff --git a/lib/Backend/InterpreterThunkEmitter.cpp b/lib/Backend/InterpreterThunkEmitter.cpp index c1f54595d0e..110b00b0ccc 100644 --- a/lib/Backend/InterpreterThunkEmitter.cpp +++ b/lib/Backend/InterpreterThunkEmitter.cpp @@ -6,6 +6,7 @@ #ifdef ENABLE_NATIVE_CODEGEN #ifdef _M_X64 +#ifdef _WIN32 const BYTE InterpreterThunkEmitter::FunctionBodyOffset = 23; const BYTE InterpreterThunkEmitter::DynamicThunkAddressOffset = 27; const BYTE InterpreterThunkEmitter::CallBlockStartAddrOffset = 37; @@ -52,6 +53,44 @@ const BYTE InterpreterThunkEmitter::Epilog[] = { 0x48, 0x83, 0xC4, StackAllocSize, // add rsp,28h 0xC3 // ret }; +#else // Sys V AMD64 +const BYTE InterpreterThunkEmitter::FunctionBodyOffset = 7; +const BYTE InterpreterThunkEmitter::DynamicThunkAddressOffset = 11; +const BYTE InterpreterThunkEmitter::CallBlockStartAddrOffset = 21; +const BYTE InterpreterThunkEmitter::ThunkSizeOffset = 35; +const BYTE InterpreterThunkEmitter::ErrorOffset = 44; +const BYTE InterpreterThunkEmitter::ThunkAddressOffset = 57; + +const BYTE InterpreterThunkEmitter::PrologSize = 56; +const BYTE InterpreterThunkEmitter::StackAllocSize = 0x0; + +const BYTE InterpreterThunkEmitter::InterpreterThunk[] = { + 0x55, // push rbp // Prolog - setup the stack frame + 0x48, 0x89, 0xe5, // mov rbp, rsp + 0x48, 0x8b, 0x47, 0x00, // mov rax, qword ptr [rdi + FunctionBodyOffset] + 0x48, 0x8b, 0x50, 0x00, // mov rdx, qword ptr [rax + DynamicThunkAddressOffset] + // Range Check for Valid call target + 0x48, 0x83, 0xE2, 0xF8, // and rdx, 0xfffffffffffffff8 // Force 8 byte alignment + 0x48, 0x89, 0xd1, // mov rcx, rdx + 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov rax, CallBlockStartAddress + 0x48, 0x29, 0xc1, // sub rcx, rax + 0x48, 0x81, 0xf9, 0x00, 0x00, 0x00, 0x00, // cmp rcx, ThunkSize + 0x76, 0x09, // jbe safe + 0x48, 0xc7, 0xc1, 0x00, 0x00, 0x00, 0x00, // mov rcx, errorcode + 0xcd, 0x29, // int 29h <-- xplat TODO: just to exit + + // safe: + 0x48, 0x8d, 0x7c, 0x24, 0x10, // lea rdi, [rsp+0x10] + 0x48, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov rax, // stack already 16-byte aligned + 0xff, 0xe2, // jmp rdx + 0xcc, 0xcc, 0xcc, 0xcc, 0xcc // int 3 // for alignment to size of 8 +}; + +const BYTE InterpreterThunkEmitter::Epilog[] = { + 0x5d, // pop rbp + 0xc3 // ret +}; +#endif #elif defined(_M_ARM) const BYTE InterpreterThunkEmitter::ThunkAddressOffset = 8; const BYTE InterpreterThunkEmitter::FunctionBodyOffset = 18; @@ -260,12 +299,12 @@ void InterpreterThunkEmitter::NewThunkBlock() #ifdef ASMJS_PLAT if (isAsmInterpreterThunk) { - interpreterThunk = Js::InterpreterStackFrame::InterpreterAsmThunk; + interpreterThunk = (void*)Js::InterpreterStackFrame::InterpreterAsmThunk; } else #endif { - interpreterThunk = Js::InterpreterStackFrame::InterpreterThunk; + interpreterThunk = (void*)Js::InterpreterStackFrame::InterpreterThunk; } allocation = emitBufferManager.AllocateBuffer(bufferSize, &buffer); diff --git a/lib/Backend/JITObjTypeSpecFldInfo.h b/lib/Backend/JITObjTypeSpecFldInfo.h index 0d3171e5141..cebcbbf6608 100644 --- a/lib/Backend/JITObjTypeSpecFldInfo.h +++ b/lib/Backend/JITObjTypeSpecFldInfo.h @@ -61,7 +61,7 @@ class JITObjTypeSpecFldInfo _Inout_updates_(arrayLength) ObjTypeSpecFldIDL * jitData); // TODO: OOP JIT, implement this - wchar_t* GetCacheLayoutString() { __debugbreak(); return nullptr; } + char16* GetCacheLayoutString() { __debugbreak(); return nullptr; } private: Js::ObjTypeSpecFldInfoFlags GetFlags() const; diff --git a/lib/Backend/JITTimeFunctionBody.cpp b/lib/Backend/JITTimeFunctionBody.cpp index fc4ae06abce..c897f2dad58 100644 --- a/lib/Backend/JITTimeFunctionBody.cpp +++ b/lib/Backend/JITTimeFunctionBody.cpp @@ -232,7 +232,7 @@ JITTimeFunctionBody::InitializeJITFunctionData( jitBody->hasFinally = functionBody->GetHasFinally(); jitBody->nameLength = functionBody->GetDisplayNameLength() + 1; // +1 for null terminator - jitBody->displayName = (wchar_t *)functionBody->GetDisplayName(); + jitBody->displayName = (char16 *)functionBody->GetDisplayName(); jitBody->objectLiteralTypesAddr = (intptr_t)functionBody->GetObjectLiteralTypes(); jitBody->literalRegexCount = functionBody->GetLiteralRegexCount(); jitBody->literalRegexes = (intptr_t*)functionBody->GetLiteralRegexes(); @@ -1085,7 +1085,7 @@ JITTimeFunctionBody::InitializeStatementMap(Js::SmallSpanSequence * statementMap return true; } -wchar_t* +char16* JITTimeFunctionBody::GetDisplayName() const { return m_bodyData.displayName; diff --git a/lib/Backend/JITTimeFunctionBody.h b/lib/Backend/JITTimeFunctionBody.h index df746cf0ced..782ea24a20b 100644 --- a/lib/Backend/JITTimeFunctionBody.h +++ b/lib/Backend/JITTimeFunctionBody.h @@ -174,7 +174,7 @@ class JITTimeFunctionBody static bool LoopContains(const JITLoopHeaderIDL * loop1, const JITLoopHeaderIDL * loop2); - wchar_t* GetDisplayName() const; + char16* GetDisplayName() const; intptr_t GetAuxDataAddr(uint offset) const; const Js::PropertyIdArray * ReadPropertyIdArrayFromAuxData(uint offset) const; diff --git a/lib/Backend/JITTimeProfileInfo.cpp b/lib/Backend/JITTimeProfileInfo.cpp index 7086c7acd03..a43baa85dfc 100644 --- a/lib/Backend/JITTimeProfileInfo.cpp +++ b/lib/Backend/JITTimeProfileInfo.cpp @@ -3,7 +3,7 @@ // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- -#include "BackEnd.h" +#include "Backend.h" JITTimeProfileInfo::JITTimeProfileInfo(ProfileDataIDL * profileData) : m_profileData(*profileData) diff --git a/lib/Backend/JITType.cpp b/lib/Backend/JITType.cpp index 166b7f941ae..9b1e43d5e11 100644 --- a/lib/Backend/JITType.cpp +++ b/lib/Backend/JITType.cpp @@ -111,13 +111,13 @@ JITTypeHolder::operator!=(const JITTypeHolder& p) const } bool -JITTypeHolder::operator==(const nullptr_t &p) const +JITTypeHolder::operator==(const std::nullptr_t &p) const { return this->t == nullptr; } bool -JITTypeHolder::operator!=(const nullptr_t &p) const +JITTypeHolder::operator!=(const std::nullptr_t &p) const { return this->t != nullptr; } diff --git a/lib/Backend/JITType.h b/lib/Backend/JITType.h index 2a2ae4d47bc..accddd2fe4f 100644 --- a/lib/Backend/JITType.h +++ b/lib/Backend/JITType.h @@ -44,8 +44,8 @@ class JITTypeHolder bool operator< (const JITTypeHolder& p) const; bool operator<= (const JITTypeHolder& p) const; void operator =(const JITTypeHolder &p); - bool operator== (const nullptr_t &p) const; - bool operator!= (const nullptr_t &p) const; + bool operator== (const std::nullptr_t &p) const; + bool operator!= (const std::nullptr_t &p) const; private: // prevent implicit conversion diff --git a/lib/Backend/JITTypeHandler.cpp b/lib/Backend/JITTypeHandler.cpp index 26badc271f7..69929b6b877 100644 --- a/lib/Backend/JITTypeHandler.cpp +++ b/lib/Backend/JITTypeHandler.cpp @@ -3,7 +3,7 @@ // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- -#include "BackEnd.h" +#include "Backend.h" JITTypeHandler::JITTypeHandler(TypeHandlerIDL * data) { diff --git a/lib/Backend/LinearScan.cpp b/lib/Backend/LinearScan.cpp index 06709ec16b4..894ad5f29cc 100644 --- a/lib/Backend/LinearScan.cpp +++ b/lib/Backend/LinearScan.cpp @@ -3241,7 +3241,7 @@ LinearScan::InsertStores(Lifetime *lifetime, RegNum reg, IR::Instr *insertionIns if (sym->m_isSingleDef) { IR::Instr * defInstr = sym->m_instrDef; - if (!sym->IsConst() && defInstr->GetDst()->AsRegOpnd()->GetReg() == RegNOREG + if ((!sym->IsConst() && defInstr->GetDst()->AsRegOpnd()->GetReg() == RegNOREG) || this->secondChanceRegs.Test(reg)) { // This can happen if we were trying to allocate this lifetime, @@ -4014,7 +4014,7 @@ LinearScan::InsertSecondChanceCompensation(Lifetime ** branchRegContent, Lifetim IR::BranchInstr *branchInstr, IR::LabelInstr *labelInstr) { IR::Instr *prevInstr = branchInstr->GetPrevRealInstrOrLabel(); - bool needsAirlock = branchInstr->IsConditional() || (prevInstr->IsBranchInstr() && prevInstr->AsBranchInstr()->IsConditional() || branchInstr->IsMultiBranch()); + bool needsAirlock = branchInstr->IsConditional() || (prevInstr->IsBranchInstr() && prevInstr->AsBranchInstr()->IsConditional()) || branchInstr->IsMultiBranch(); bool hasAirlock = false; IR::Instr *insertionInstr = branchInstr; IR::Instr *insertionStartInstr = branchInstr->m_prev; diff --git a/lib/Backend/Lower.cpp b/lib/Backend/Lower.cpp index 27d3be505ea..cad5deabefd 100644 --- a/lib/Backend/Lower.cpp +++ b/lib/Backend/Lower.cpp @@ -156,7 +156,7 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa // The instr can have just debugger bailout, or debugger bailout + other shared bailout. // Note that by the time we get here, we should not have aux-only bailout (in globopt we promote it to normal bailout). if (m_func->IsJitInDebugMode() && instr->HasBailOutInfo() && - ((instr->GetBailOutKind() & IR::BailOutForDebuggerBits) && instr->m_opcode != Js::OpCode::BailForDebugger || + (((instr->GetBailOutKind() & IR::BailOutForDebuggerBits) && instr->m_opcode != Js::OpCode::BailForDebugger) || instr->HasAuxBailOut())) { instr = this->SplitBailForDebugger(instr); // Change instr, as returned is the one we need to lower next. @@ -254,7 +254,7 @@ Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFa case Js::OpCode::InvalCachedScope: this->LowerBinaryHelper(instr, IR::HelperOP_InvalidateCachedScope); break; - + case Js::OpCode::InitCachedScope: instrPrev = this->LowerInitCachedScope(instr); break; @@ -3166,7 +3166,7 @@ Lowerer::LoadOptimizationOverridesValueOpnd(IR::Instr *instr, OptimizationOverri IR::Opnd * Lowerer::LoadNumberAllocatorValueOpnd(IR::Instr *instr, NumberAllocatorValue valueType) -{ +{ ScriptContextInfo *scriptContext = instr->m_func->GetScriptContextInfo(); bool allowNativeCodeBumpAllocation = scriptContext->GetRecyclerAllowNativeCodeBumpAllocation(); @@ -3349,7 +3349,7 @@ Lowerer::TryGenerateFastBrEq(IR::Instr * instr) // Fast path for == null or == undefined // if (src == null || src == undefined) - if (isConst || srcReg2 && this->IsNullOrUndefRegOpnd(srcReg2)) + if (isConst || (srcReg2 && this->IsNullOrUndefRegOpnd(srcReg2))) { IR::BranchInstr *newBranch; newBranch = this->GenerateFastBrConst(instr->AsBranchInstr(), @@ -3513,7 +3513,7 @@ Lowerer::LowerNewScObjectLiteral(IR::Instr *newObjInstr) propertyArrayOpnd = IR::AddrOpnd::New(propArrayAddr, IR::AddrOpndKindDynamicMisc, this->m_func); //#if 0 TODO: OOP JIT, obj literal types - // should pass in isShared bit through RPC, enable for in-proc jit to see perf impact + // should pass in isShared bit through RPC, enable for in-proc jit to see perf impact Js::DynamicType * literalType = func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts) ? nullptr : *(Js::DynamicType **)literalTypeRef; if (literalType == nullptr || !literalType->GetIsShared()) @@ -6832,7 +6832,7 @@ Lowerer::GenerateStFldWithCachedType(IR::Instr *instrStFld, bool* continueAsHelp if (hasTypeCheckBailout) { - AssertMsg(PHASE_ON1(Js::ObjTypeSpecIsolatedFldOpsWithBailOutPhase) || !propertySymOpnd->IsTypeDead(), + AssertMsg(PHASE_ON1(Js::ObjTypeSpecIsolatedFldOpsWithBailOutPhase) || !propertySymOpnd->IsTypeDead(), "Why does a field store have a type check bailout, if its type is dead?"); if (instrStFld->GetBailOutInfo()->bailOutInstr != instrStFld) @@ -7036,7 +7036,7 @@ Lowerer::PinTypeRef(JITTypeHolder type, void* typeRef, IR::Instr* instr, Js::Pro Output::Print(_u("PinnedTypes: function %s(%s) instr %s property %s(#%u) pinned %s reference 0x%p to type 0x%p.\n"), this->m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), m_func->GetThreadContextInfo()->GetPropertyRecord(propertyId)->GetBuffer(), propertyId, - typeRef == type.t ? L"strong" : L"weak", typeRef, type.t); + typeRef == type.t ? _u("strong") : _u("weak"), typeRef, type.t); Output::Flush(); } } @@ -8590,7 +8590,7 @@ Lowerer::LowerMemset(IR::Instr * instr, IR::RegOpnd * helperRet) m_lowererMD.LoadHelperArgument(instr, baseOpnd); m_lowererMD.ChangeToHelperCall(instr, helperMethod); dst->Free(m_func); - + return instrPrev; } @@ -8865,10 +8865,10 @@ Lowerer::GenerateFastBrBReturn(IR::Instr * instr) // TEST firstPrototypeOpnd, firstPrototypeOpnd // JNE $helper IR::RegOpnd * firstPrototypeOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); - InsertMove(firstPrototypeOpnd, + InsertMove(firstPrototypeOpnd, IR::IndirOpnd::New(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfFirstPrototype(), TyMachPtr, this->m_func), instr); InsertTestBranch(firstPrototypeOpnd, firstPrototypeOpnd, Js::OpCode::BrNeq_A, labelHelper, instr); - + // MOV currentEnumeratorOpnd, forInEnumerator->enumerator.currentEnumerator // TEST currentEnumeratorOpnd, currentEnumeratorOpnd // JNE $helper @@ -8883,7 +8883,7 @@ Lowerer::GenerateFastBrBReturn(IR::Instr * instr) IR::RegOpnd * objectOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); InsertMove(objectOpnd, IR::IndirOpnd::New(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorObject(), TyMachPtr, this->m_func), instr); - InsertTestBranch(objectOpnd, objectOpnd, Js::OpCode::BrEq_A, labelHelper, instr); + InsertTestBranch(objectOpnd, objectOpnd, Js::OpCode::BrEq_A, labelHelper, instr); // MOV initialTypeOpnd, forInEnumerator->enumerator.initialType // CMP initialTypeOpnd, objectOpnd->type @@ -8907,7 +8907,7 @@ Lowerer::GenerateFastBrBReturn(IR::Instr * instr) InsertCompareBranch(enumeratedCountOpnd, IR::IndirOpnd::New(cachedDataOpnd, Js::DynamicObjectPropertyEnumerator::GetOffsetOfCachedDataCachedCount(), TyUint32, this->m_func), Js::OpCode::BrGe_A, labelHelper, instr); - + // MOV propertyAttributesOpnd, cachedData->attributes // MOV objectPropertyAttributesOpnd, propertyAttributesOpnd[enumeratedCount] // CMP objectPropertyAttributesOpnd & PropertyEnumerable, PropertyEnumerable @@ -8923,7 +8923,7 @@ Lowerer::GenerateFastBrBReturn(IR::Instr * instr) InsertCompareBranch(andPropertyEnumerableInstr->GetDst(), IR::IntConstOpnd::New(PropertyEnumerable, TyUint8, this->m_func), Js::OpCode::BrNeq_A, labelHelper, instr); - + IR::Opnd * opndDst = instr->GetDst(); // ForIn result propertyString Assert(opndDst->IsRegOpnd()); @@ -9932,7 +9932,7 @@ Lowerer::LowerArgIn(IR::Instr *instrArgIn) // ... // s2 = assign param2 // $done: - + AnalysisAssert(instrArgIn); IR::Opnd *restDst = nullptr; @@ -10086,7 +10086,7 @@ Lowerer::LowerArgIn(IR::Instr *instrArgIn) BVSparse *formalsBv = JitAnew(this->m_alloc, BVSparse, this->m_alloc); - + while (currArgInCount > 0) { dstOpnd = instrArgIn->GetDst(); @@ -10104,9 +10104,9 @@ Lowerer::LowerArgIn(IR::Instr *instrArgIn) // BrEq_A $Ln-1 currArgInCount--; - + labelInitNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); - + // And insert the "normal" initialization before the "done" label @@ -12543,7 +12543,7 @@ Lowerer::GenerateBailOut(IR::Instr * instr, IR::BranchInstr * branchInstr, IR::L else { indexOpnd = IR::MemRefOpnd::New((BYTE*)bailOutInfo->bailOutRecord + BailOutRecord::GetOffsetOfPolymorphicCacheIndex(), TyUint32, this->m_func); - } + } m_lowererMD.CreateAssign( indexOpnd, IR::IntConstOpnd::New(bailOutInfo->polymorphicCacheIndex, TyUint32, this->m_func), instr); @@ -13479,8 +13479,8 @@ bool Lowerer::ShouldGenerateArrayFastPath( return true; } - if( !supportsObjectsWithArrays && arrayValueType.GetObjectType() == ObjectType::ObjectWithArray || - !supportsTypedArrays && arrayValueType.IsLikelyTypedArray()) + if( (!supportsObjectsWithArrays && arrayValueType.GetObjectType() == ObjectType::ObjectWithArray) || + (!supportsTypedArrays && arrayValueType.IsLikelyTypedArray()) ) { // The fast path likely would not hit return false; @@ -13935,8 +13935,8 @@ IR::BranchInstr *Lowerer::InsertCompareBranch( // Check for compare with zero, to prefer using Test instead of Cmp if( !compareSrc1->IsRegOpnd() || !( - compareSrc2->IsIntConstOpnd() && compareSrc2->AsIntConstOpnd()->GetValue() == 0 || - compareSrc2->IsAddrOpnd() && !compareSrc2->AsAddrOpnd()->m_address + (compareSrc2->IsIntConstOpnd() && compareSrc2->AsIntConstOpnd()->GetValue() == 0) || + (compareSrc2->IsAddrOpnd() && !compareSrc2->AsAddrOpnd()->m_address) ) || branchOpCode == Js::OpCode::BrGt_A || branchOpCode == Js::OpCode::BrLe_A) { @@ -14703,7 +14703,7 @@ Lowerer::GenerateFastElemIIntIndexCommon( const bool needBailOutOnInvalidLength = !!(bailOutKind & (IR::BailOutOnInvalidatedArrayHeadSegment)); const bool needBailOutToHelper = !!(bailOutKind & (IR::BailOutOnArrayAccessHelperCall)); const bool needBailOutOnSegmentLengthCompare = needBailOutToHelper || needBailOutOnInvalidLength; - + if(indexIsLessThanHeadSegmentLength || needBailOutOnSegmentLengthCompare) { if (needBailOutOnSegmentLengthCompare) @@ -14793,9 +14793,9 @@ Lowerer::GenerateFastElemIIntIndexCommon( { if(pLabelSegmentLengthIncreased && !( - baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues() || - (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) && - instr->IsProfiledInstr() && !instr->AsProfiledInstr()->u.stElemInfo->LikelyFillsMissingValue() + (baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues()) || + ((instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) && + instr->IsProfiledInstr() && !instr->AsProfiledInstr()->u.stElemInfo->LikelyFillsMissingValue()) )) { // For arrays that are not guaranteed to have no missing values, before storing to an element where @@ -15485,10 +15485,10 @@ Lowerer::GenerateFastLdElemI(IR::Instr *& ldElem, bool *instrIsInHelperBlockRef) const IR::AutoReuseOpnd autoReuseIndirOpnd(indirOpnd, m_func); const ValueType baseValueType(src1->AsIndirOpnd()->GetBaseOpnd()->GetValueType()); - if (ldElem->HasBailOutInfo() && - ldElem->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset && - ldElem->GetBailOutInfo()->bailOutOffset <= ldElem->GetByteCodeOffset() && - dst->IsEqual(src1->AsIndirOpnd()->GetBaseOpnd()) || + if ((ldElem->HasBailOutInfo() && + ldElem->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset && + ldElem->GetBailOutInfo()->bailOutOffset <= ldElem->GetByteCodeOffset() && + dst->IsEqual(src1->AsIndirOpnd()->GetBaseOpnd())) || (src1->AsIndirOpnd()->GetIndexOpnd() && dst->IsEqual(src1->AsIndirOpnd()->GetIndexOpnd()))) { // This is a pre-op bailout where the dst is the same as one of the srcs. The dst may be trashed before bailing out, @@ -16331,7 +16331,7 @@ Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef) InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); } - if (!(isStringIndex || baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues())) + if (!(isStringIndex || (baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues()))) { if(!stElem->IsProfiledInstr() || stElem->AsProfiledInstr()->u.stElemInfo->LikelyFillsMissingValue()) { @@ -18544,7 +18544,7 @@ Lowerer::GenerateFastArgumentsLdElemI(IR::Instr* ldElem, IR::LabelInstr *labelFa ldElem->InsertBefore(labelCreateHeapArgs); emittedFastPath = true; } - + if (!emittedFastPath) { throw Js::RejitException(RejitReason::DisableStackArgOpt); @@ -20655,7 +20655,7 @@ Lowerer::GenerateLdHomeObjProto(IR::Instr* instr) // // $Err: // ThrowRuntimeReferenceError(JSERR_BadSuperReference); - // + // // $NoErr: // instance = ((RecyclableObject*)instance)->GetPrototype(); // if (instance == nullptr) goto $Done; diff --git a/lib/Backend/LowerMDShared.cpp b/lib/Backend/LowerMDShared.cpp index 7315cd5e16f..fbb7fab69f9 100644 --- a/lib/Backend/LowerMDShared.cpp +++ b/lib/Backend/LowerMDShared.cpp @@ -190,7 +190,7 @@ LowererMD::LowerCallHelper(IR::Instr *instrCall) Assert(regArg->m_sym->m_isSingleDef); IR::Instr *instrArg = regArg->m_sym->m_instrDef; - Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A || + Assert(instrArg->m_opcode == Js::OpCode::ArgOut_A || (helperMethod == IR::JnHelperMethod::HelperOP_InitCachedScope && instrArg->m_opcode == Js::OpCode::ExtendArg_A)); prevInstr = LoadHelperArgument(prevInstr, instrArg->GetSrc1()); @@ -447,22 +447,22 @@ LowererMD::LowerLeaveNull(IR::Instr *finallyEndInstr) #if _M_X64 { // amd64_ReturnFromCallWithFakeFrame expects to find the spill size and args size - // in r8 and r9. + // in REG_EH_SPILL_SIZE and REG_EH_ARGS_SIZE. - // MOV r8, spillSize + // MOV REG_EH_SPILL_SIZE, spillSize IR::Instr *movR8 = IR::Instr::New(Js::OpCode::LdSpillSize, - IR::RegOpnd::New(nullptr, RegR8, TyMachReg, m_func), + IR::RegOpnd::New(nullptr, REG_EH_SPILL_SIZE, TyMachReg, m_func), m_func); finallyEndInstr->InsertBefore(movR8); - // MOV r9, argsSize + // MOV REG_EH_ARGS_SIZE, argsSize IR::Instr *movR9 = IR::Instr::New(Js::OpCode::LdArgSize, - IR::RegOpnd::New(nullptr, RegR9, TyMachReg, m_func), + IR::RegOpnd::New(nullptr, REG_EH_ARGS_SIZE, TyMachReg, m_func), m_func); finallyEndInstr->InsertBefore(movR9); - IR::Opnd *targetOpnd = IR::RegOpnd::New(nullptr, RegRCX, TyMachReg, m_func); + IR::Opnd *targetOpnd = IR::RegOpnd::New(nullptr, REG_EH_TARGET, TyMachReg, m_func); IR::Instr *movTarget = IR::Instr::New(Js::OpCode::MOV, targetOpnd, IR::HelperCallOpnd::New(IR::HelperOp_ReturnFromCallWithFakeFrame, m_func), @@ -1114,12 +1114,12 @@ void LowererMD::ChangeToAdd(IR::Instr *const instr, const bool needFlags) MakeDstEquSrc1(instr); // Prefer INC for add by one - if(instr->GetDst()->IsEqual(instr->GetSrc1()) && + if((instr->GetDst()->IsEqual(instr->GetSrc1()) && instr->GetSrc2()->IsIntConstOpnd() && - instr->GetSrc2()->AsIntConstOpnd()->GetValue() == 1 || - instr->GetDst()->IsEqual(instr->GetSrc2()) && + instr->GetSrc2()->AsIntConstOpnd()->GetValue() == 1) || + (instr->GetDst()->IsEqual(instr->GetSrc2()) && instr->GetSrc1()->IsIntConstOpnd() && - instr->GetSrc1()->AsIntConstOpnd()->GetValue() == 1) + instr->GetSrc1()->AsIntConstOpnd()->GetValue() == 1)) { if(instr->GetSrc1()->IsIntConstOpnd()) { @@ -1500,8 +1500,8 @@ LowererMD::Legalize(IR::Instr *const instr, bool fPostRegAlloc) break; case Js::OpCode::TEST: - if(instr->GetSrc1()->IsImmediateOpnd() && !instr->GetSrc2()->IsImmediateOpnd() || - instr->GetSrc2()->IsMemoryOpnd() && !instr->GetSrc1()->IsMemoryOpnd()) + if((instr->GetSrc1()->IsImmediateOpnd() && !instr->GetSrc2()->IsImmediateOpnd()) || + (instr->GetSrc2()->IsMemoryOpnd() && !instr->GetSrc1()->IsMemoryOpnd())) { if (verify) { @@ -2410,33 +2410,33 @@ LowererMD::GenerateFastStringCheck(IR::Instr *instr, IR::RegOpnd *srcReg1, IR::R // if src1 is not string // generate object test, if not equal jump to $helper // compare type check to string, if not jump to $helper - // + // // if strict mode generate string test as above for src1 and jump to $failure if failed any time // else if not strict generate string test as above for src1 and jump to $helper if failed any time - // + // // Compare length of src1 and src2 if not equal goto $failure - // + // // if src1 is not flat string jump to $helper - // + // // if src1 and src2 m_pszValue pointer match goto $success - // + // // if src2 is not flat string jump to $helper - // + // // if first character of src1 and src2 doesn't match goto $failure - // + // // shift left by 1 length of src1 (length*2) - // + // // memcmp src1 and src2 flat strings till length * 2 - // + // // test eax (result of memcmp) // if equal jump to $success else to $failure - // + // // $success // jmp to $fallthrough // $failure // jmp to $fallthrough // $helper - // + // // $fallthrough // Generates: @@ -5914,8 +5914,8 @@ LowererMD::GenerateFastRecyclerAlloc(size_t allocSize, IR::RegOpnd* newObjDst, I size_t alignedSize = HeapInfo::GetAlignedSizeNoCheck(allocSize); bool allowNativeCodeBumpAllocation = scriptContext->GetRecyclerAllowNativeCodeBumpAllocation(); - Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation((void*)scriptContext->GetRecyclerAddr(), alignedSize, - allocatorAddress, endAddressOffset, freeListOffset, + Recycler::GetNormalHeapBlockAllocatorInfoForNativeAllocation((void*)scriptContext->GetRecyclerAddr(), alignedSize, + allocatorAddress, endAddressOffset, freeListOffset, allowNativeCodeBumpAllocation, this->m_func->IsOOPJIT()); endAddressOpnd = IR::MemRefOpnd::New((char*)allocatorAddress + endAddressOffset, TyMachPtr, this->m_func, IR::AddrOpndKindDynamicRecyclerAllocatorEndAddressRef); @@ -6071,7 +6071,7 @@ LowererMD::SaveDoubleToVar(IR::RegOpnd * dstOpnd, IR::RegOpnd *opndFloat, IR::In // s1 = XOR s1, FloatTag_Value // dst = s1 - + IR::Instr *setTag = IR::Instr::New(Js::OpCode::XOR, s1, s1, @@ -7960,11 +7960,11 @@ LowererMD::LowerCommitScope(IR::Instr *instrCommit) opnd = IR::IndirOpnd::New(baseOpnd, Js::ActivationObjectEx::GetOffsetOfCommitFlag(), TyInt8, this->m_func); instrCommit->SetDst(opnd); instrCommit->SetSrc1(IR::IntConstOpnd::New(1, TyInt8, this->m_func)); - + LowererMD::ChangeToAssign(instrCommit); const Js::PropertyIdArray *propIds = instrCommit->m_func->GetJITFunctionBody()->GetFormalsPropIdArray(); - + uint firstVarSlot = (uint)Js::ActivationObjectEx::GetFirstVarSlot(propIds); if (firstVarSlot < propIds->count) { diff --git a/lib/Backend/NativeCodeData.cpp b/lib/Backend/NativeCodeData.cpp index 8609b49a4a5..eba76e483b9 100644 --- a/lib/Backend/NativeCodeData.cpp +++ b/lib/Backend/NativeCodeData.cpp @@ -4,17 +4,6 @@ //------------------------------------------------------------------------------------------------------- #include "Backend.h" -char DataDesc_None[] = ""; -char DataDesc_InlineeFrameRecord_ArgOffsets[] = ""; -char DataDesc_InlineeFrameRecord_Constants[] = ""; -char DataDesc_BailoutInfo_CotalOutParamCount[] = ""; -char DataDesc_ArgOutOffsetInfo_StartCallOutParamCounts[] = ""; -char DataDesc_ArgOutOffsetInfo_StartCallArgRestoreAdjustCounts[] = ""; -char DataDesc_LowererMD_LoadFloatValue_Float[] = ""; -char DataDesc_LowererMD_LoadFloatValue_Double[] = ""; -char DataDesc_LowererMD_EmitLoadFloatCommon_Double[] = ""; -char DataDesc_LowererMD_Simd128LoadConst[] = ""; - NativeCodeData::NativeCodeData(DataChunk * chunkList) : chunkList(chunkList) { #ifdef PERF_COUNTERS @@ -51,20 +40,20 @@ NativeCodeData::AddFixupEntry(void* targetAddr, void* targetStartAddr, void* add } Assert(targetStartAddr); - + unsigned int inDataOffset = (unsigned int)((char*)targetAddr - (char*)targetStartAddr); DataChunk* targetChunk = NativeCodeData::GetDataChunk(targetStartAddr); Assert(targetChunk->len >= inDataOffset); #if DBG bool foundTargetChunk = false; - while (chunkList) + while (chunkList) { foundTargetChunk |= (chunkList == targetChunk); chunkList = chunkList->next; } AssertMsg(foundTargetChunk, "current pointer is not allocated with NativeCodeData allocator?"); // change to valid check instead of assertion? -#endif +#endif DataChunk* chunk = NativeCodeData::GetDataChunk(startAddress); @@ -75,7 +64,7 @@ NativeCodeData::AddFixupEntry(void* targetAddr, void* targetStartAddr, void* add } __analysis_assume(entry); entry->addrOffset = (unsigned int)((__int64)addrToFixup - (__int64)startAddress); - Assert(entry->addrOffset <= chunk->len - sizeof(void*)); + Assert(entry->addrOffset <= chunk->len - sizeof(void*)); entry->targetTotalOffset = targetChunk->offset + inDataOffset; entry->next = chunk->fixupList; @@ -84,7 +73,7 @@ NativeCodeData::AddFixupEntry(void* targetAddr, void* targetStartAddr, void* add #if DBG if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData Add Fixup: %p(%p+%d, chunk:%p) --> %p(chunk:%p) %S\n", + Output::Print(_u("NativeCodeData Add Fixup: %p(%p+%d, chunk:%p) --> %p(chunk:%p) %S\n"), addrToFixup, startAddress, entry->addrOffset, (void*)chunk, targetAddr, (void*)targetChunk, chunk->dataType); } #endif @@ -132,25 +121,25 @@ NativeCodeData::AddFixupEntryForPointerArray(void* startAddress, DataChunk * chu #if DBG if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData Add Fixup: %p[%d](+%d, chunk:%p) --> %p(chunk:%p) %S\n", + Output::Print(_u("NativeCodeData Add Fixup: %p[%d](+%d, chunk:%p) --> %p(chunk:%p) %S\n"), startAddress, i, entry->addrOffset, (void*)chunk, targetAddr, (void*)targetChunk, chunk->dataType); } #endif } } -wchar_t* +char16* NativeCodeData::GetDataDescription(void* data, JitArenaAllocator * alloc) { auto chunk = GetDataChunk(data); - wchar_t buf[1024] = { 0 }; + char16 buf[1024] = { 0 }; #if DBG - swprintf_s(buf, L"%hs, NativeCodeData: index: %x, len: %x, offset: +%x", chunk->dataType, chunk->allocIndex, chunk->len, chunk->offset); + swprintf_s(buf, _u("%hs, NativeCodeData: index: %x, len: %x, offset: +%x"), chunk->dataType, chunk->allocIndex, chunk->len, chunk->offset); #else - swprintf_s(buf, L"NativeCodeData: index: %x, len: %x, offset: +%x", chunk->allocIndex, chunk->len, chunk->offset); + swprintf_s(buf, _u("NativeCodeData: index: %x, len: %x, offset: +%x"), chunk->allocIndex, chunk->len, chunk->offset); #endif auto len = wcslen(buf) + 1; - auto desc = JitAnewArray(alloc, wchar_t, len); + auto desc = JitAnewArray(alloc, char16, len); wcscpy_s(desc, len, buf); return desc; } @@ -160,7 +149,7 @@ NativeCodeData::VerifyExistFixupEntry(void* targetAddr, void* addrToFixup, void* { DataChunk* chunk = NativeCodeData::GetDataChunk(startAddress); DataChunk* targetChunk = NativeCodeData::GetDataChunk(targetAddr); - if (chunk->len == 0) + if (chunk->len == 0) { return; } @@ -216,7 +205,7 @@ char * NativeCodeData::Allocator::Alloc(size_t requestSize) { char * data = nullptr; - Assert(!finalized); + Assert(!finalized); requestSize = Math::Align(requestSize, sizeof(void*)); DataChunk * newChunk = HeapNewStructPlus(requestSize, DataChunk); diff --git a/lib/Backend/NativeCodeData.h b/lib/Backend/NativeCodeData.h index 7c9646ecc83..5591ef6ad0f 100644 --- a/lib/Backend/NativeCodeData.h +++ b/lib/Backend/NativeCodeData.h @@ -47,7 +47,7 @@ class NativeCodeData return (NativeCodeData::DataChunk*)((char*)data - offsetof(NativeCodeData::DataChunk, data)); } - static wchar_t* GetDataDescription(void* data, JitArenaAllocator * alloc); + static char16* GetDataDescription(void* data, JitArenaAllocator * alloc); static unsigned int GetDataTotalOffset(void* data) { @@ -78,7 +78,7 @@ class NativeCodeData char * Alloc(DECLSPEC_GUARD_OVERFLOW size_t requestedBytes); char * AllocZero(DECLSPEC_GUARD_OVERFLOW size_t requestedBytes); - char * AllocLeaf(__declspec(guard(overflow)) size_t requestedBytes); + char * AllocLeaf(DECLSPEC_GUARD_OVERFLOW size_t requestedBytes); NativeCodeData * Finalize(); void Free(void * buffer, size_t byteSize); @@ -110,7 +110,7 @@ class NativeCodeData void Fixup(NativeCodeData::DataChunk* chunkList) { int count = NativeCodeData::GetDataChunk(this)->len / sizeof(T); - while (count-- > 0) + while (count-- > 0) { (((T*)this) + count)->Fixup(chunkList); } @@ -128,8 +128,8 @@ class NativeCodeData DataChunk* chunk = NativeCodeData::GetDataChunk(dataBlock); chunk->dataType = typeid(T).name(); if (PHASE_TRACE1(Js::NativeCodeDataPhase)) - { - Output::Print(L"NativeCodeData AllocNoFix: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n", + { + Output::Print(_u("NativeCodeData AllocNoFix: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n"), chunk, (void*)dataBlock, chunk->allocIndex, chunk->len, chunk->offset, chunk->dataType); } #endif @@ -145,7 +145,7 @@ class NativeCodeData chunk->dataType = typeid(T).name(); if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData AllocNoFix: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n", + Output::Print(_u("NativeCodeData AllocNoFix: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n"), chunk, (void*)dataBlock, chunk->allocIndex, chunk->len, chunk->offset, chunk->dataType); } #endif @@ -172,7 +172,7 @@ class NativeCodeData chunk->dataType = typeid(T).name(); if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData Alloc: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n", + Output::Print(_u("NativeCodeData Alloc: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n"), chunk, (void*)dataBlock, chunk->allocIndex, chunk->len, chunk->offset, chunk->dataType); } #endif @@ -199,37 +199,40 @@ class NativeCodeData ~NativeCodeData(); }; -char DataDesc_None[]; -char DataDesc_InlineeFrameRecord_ArgOffsets[]; -char DataDesc_InlineeFrameRecord_Constants[]; -char DataDesc_BailoutInfo_CotalOutParamCount[]; -char DataDesc_ArgOutOffsetInfo_StartCallOutParamCounts[]; -char DataDesc_ArgOutOffsetInfo_StartCallArgRestoreAdjustCounts[]; -char DataDesc_LowererMD_LoadFloatValue_Float[]; -char DataDesc_LowererMD_LoadFloatValue_Double[]; -char DataDesc_LowererMD_EmitLoadFloatCommon_Double[]; -char DataDesc_LowererMD_Simd128LoadConst[]; - -template -struct IntType -{ - int data; +enum DataDesc +{ + DataDesc_None, + DataDesc_InlineeFrameRecord_ArgOffsets, + DataDesc_InlineeFrameRecord_Constants, + DataDesc_BailoutInfo_CotalOutParamCount, + DataDesc_ArgOutOffsetInfo_StartCallOutParamCounts, + DataDesc_ArgOutOffsetInfo_StartCallArgRestoreAdjustCounts, + DataDesc_LowererMD_LoadFloatValue_Float, + DataDesc_LowererMD_LoadFloatValue_Double, + DataDesc_LowererMD_EmitLoadFloatCommon_Double, + DataDesc_LowererMD_Simd128LoadConst, }; -template +template +struct IntType +{ + int data; +}; + +template struct UIntType { uint data; }; -template +template struct FloatType { FloatType(float val) :data(val) {} float data; }; -template +template struct DoubleType { DoubleType() {} @@ -237,7 +240,7 @@ struct DoubleType double data; }; -template +template struct SIMDType { SIMDType() {} @@ -245,7 +248,7 @@ struct SIMDType AsmJsSIMDValue data; }; -template +template struct VarType { Js::Var data; @@ -254,14 +257,16 @@ struct VarType AssertMsg(false, "Please specialize Fixup method for this Var type or use no-fixup allocator"); } }; + template<> -void VarType::Fixup(NativeCodeData::DataChunk* chunkList) +inline void VarType::Fixup(NativeCodeData::DataChunk* chunkList) { AssertMsg(false, "InlineeFrameRecord::constants contains Var from main process, should not fixup"); } struct GlobalBailOutRecordDataTable; -template<> void NativeCodeData::Array::Fixup(NativeCodeData::DataChunk* chunkList) +template<> +inline void NativeCodeData::Array::Fixup(NativeCodeData::DataChunk* chunkList) { NativeCodeData::AddFixupEntryForPointerArray(this, chunkList); } diff --git a/lib/Backend/NativeCodeGenerator.cpp b/lib/Backend/NativeCodeGenerator.cpp index ff6428b2714..e7fe06ec54f 100644 --- a/lib/Backend/NativeCodeGenerator.cpp +++ b/lib/Backend/NativeCodeGenerator.cpp @@ -864,12 +864,12 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor workItem->GetJITData()->nativeDataAddr = (__int3264)workItem->GetEntryPoint()->GetNativeDataBufferRef(); // TODO: oop jit can we be more efficient here? - ArenaAllocator alloc(L"JitData", pageAllocator, Js::Throw::OutOfMemory); + ArenaAllocator alloc(_u("JitData"), pageAllocator, Js::Throw::OutOfMemory); auto& jitData = workItem->GetJITData()->jitData; jitData = AnewStructZ(&alloc, FunctionJITTimeDataIDL); FunctionJITTimeInfo::BuildJITTimeData(&alloc, workItem->RecyclableData()->JitTimeData(), nullptr, workItem->GetJITData()->jitData, false); - + Js::EntryPointInfo * epInfo = workItem->GetEntryPoint(); if (workItem->Type() == JsFunctionType) { @@ -880,7 +880,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor { workItem->GetJITData()->jittedLoopIterationsSinceLastBailoutAddr = (intptr_t)Js::FunctionBody::GetJittedLoopIterationsSinceLastBailoutAddress(epInfo); } - + jitData->sharedPropertyGuards = epInfo->GetSharedPropertyGuardsWithLock(&alloc, jitData->sharedPropGuardCount); JITOutputIDL jitWriteData = {0}; @@ -917,7 +917,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor { CodeGenAllocators *const allocators = foreground ? EnsureForegroundAllocators(pageAllocator) : GetBackgroundAllocator(pageAllocator); // okay to do outside lock since the respective function is called only from one thread - NoRecoverMemoryJitArenaAllocator jitArena(L"JITArena", pageAllocator, Js::Throw::OutOfMemory); + NoRecoverMemoryJitArenaAllocator jitArena(_u("JITArena"), pageAllocator, Js::Throw::OutOfMemory); JITTimeWorkItem * jitWorkItem = Anew(&jitArena, JITTimeWorkItem, workItem->GetJITData()); @@ -953,7 +953,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor QueryPerformanceFrequency(&freq); Output::Print( - L"BackendMarshalOut - function: %s time:%8.6f mSec\r\n", + _u("BackendMarshalOut - function: %s time:%8.6f mSec\r\n"), workItem->GetFunctionBody()->GetDisplayName(), (((double)((end_time.QuadPart - jitWriteData.startTime)* (double)1000.0 / (double)freq.QuadPart))) / (1)); Output::Flush(); @@ -1000,7 +1000,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData Fixup: allocIndex:%d, len:%x, totalOffset:%x, startAddress:%p\n", + Output::Print(_u("NativeCodeData Fixup: allocIndex:%d, len:%x, totalOffset:%x, startAddress:%p\n"), record.index, record.length, record.startOffset, jitWriteData.buffer->data + record.startOffset); } @@ -1011,7 +1011,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"\tEntry: +%x %p(%p) ==> %p\n", updateList->addrOffset, addrToFixup, *(void**)(addrToFixup), targetAddr); + Output::Print(_u("\tEntry: +%x %p(%p) ==> %p\n"), updateList->addrOffset, addrToFixup, *(void**)(addrToFixup), targetAddr); } *(void**)(addrToFixup) = targetAddr; @@ -1029,7 +1029,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor #if DBG if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData Client Buffer: %p, len: %x\n", jitWriteData.buffer->data, jitWriteData.buffer->len); + Output::Print(_u("NativeCodeData Client Buffer: %p, len: %x\n"), jitWriteData.buffer->data, jitWriteData.buffer->len); } #endif } @@ -1208,17 +1208,17 @@ void NativeCodeGenerator::LogCodeGenStart(CodeGenWorkItem * workItem, LARGE_INTE { if (workItem->GetEntryPoint()->IsLoopBody()) { - Output::Print(L"---BeginBackEnd: function: %s, loop:%d---\r\n", body->GetDisplayName(), ((JsLoopBodyCodeGen*)workItem)->GetLoopNumber()); + Output::Print(_u("---BeginBackEnd: function: %s, loop:%d---\r\n"), body->GetDisplayName(), ((JsLoopBodyCodeGen*)workItem)->GetLoopNumber()); } else { - Output::Print(L"---BeginBackEnd: function: %s---\r\n", body->GetDisplayName()); + Output::Print(_u("---BeginBackEnd: function: %s---\r\n"), body->GetDisplayName()); } Output::Flush(); } #endif - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; if (PHASE_TRACE(Js::BackEndPhase, body)) { @@ -1226,7 +1226,7 @@ void NativeCodeGenerator::LogCodeGenStart(CodeGenWorkItem * workItem, LARGE_INTE if (workItem->GetEntryPoint()->IsLoopBody()) { Output::Print( - L"BeginBackEnd - function: %s (%s, line %u), loop: %u, mode: %S", + _u("BeginBackEnd - function: %s (%s, line %u), loop: %u, mode: %S"), body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer), body->GetLineNumber(), @@ -1234,17 +1234,17 @@ void NativeCodeGenerator::LogCodeGenStart(CodeGenWorkItem * workItem, LARGE_INTE ExecutionModeName(workItem->GetJitMode())); if (body->GetIsAsmjsMode()) { - Output::Print(L" (Asmjs)\n"); + Output::Print(_u(" (Asmjs)\n")); } else { - Output::Print(L"\n"); + Output::Print(_u("\n")); } } else { Output::Print( - L"BeginBackEnd - function: %s (%s, line %u), mode: %S", + _u("BeginBackEnd - function: %s (%s, line %u), mode: %S"), body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer), body->GetLineNumber(), @@ -1252,11 +1252,11 @@ void NativeCodeGenerator::LogCodeGenStart(CodeGenWorkItem * workItem, LARGE_INTE if (body->GetIsAsmjsMode()) { - Output::Print(L" (Asmjs)\n"); + Output::Print(_u(" (Asmjs)\n")); } else { - Output::Print(L"\n"); + Output::Print(_u("\n")); } } Output::Flush(); @@ -1268,18 +1268,18 @@ void NativeCodeGenerator::LogCodeGenStart(CodeGenWorkItem * workItem, LARGE_INTE if (workItem->RecyclableData()->JitTimeData()->inlineCacheStats) { auto stats = workItem->RecyclableData()->JitTimeData()->inlineCacheStats; - Output::Print(L"ObjTypeSpec: jitting function %s (#%s): inline cache stats:\n", body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer)); - Output::Print(L" overall: total %u, no profile info %u\n", stats->totalInlineCacheCount, stats->noInfoInlineCacheCount); - Output::Print(L" mono: total %u, empty %u, cloned %u\n", + Output::Print(_u("ObjTypeSpec: jitting function %s (#%s): inline cache stats:\n"), body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer)); + Output::Print(_u(" overall: total %u, no profile info %u\n"), stats->totalInlineCacheCount, stats->noInfoInlineCacheCount); + Output::Print(_u(" mono: total %u, empty %u, cloned %u\n"), stats->monoInlineCacheCount, stats->emptyMonoInlineCacheCount, stats->clonedMonoInlineCacheCount); - Output::Print(L" poly: total %u (high %u, low %u), null %u, empty %u, ignored %u, disabled %u, equivalent %u, non-equivalent %u, cloned %u\n", + Output::Print(_u(" poly: total %u (high %u, low %u), null %u, empty %u, ignored %u, disabled %u, equivalent %u, non-equivalent %u, cloned %u\n"), stats->polyInlineCacheCount, stats->highUtilPolyInlineCacheCount, stats->lowUtilPolyInlineCacheCount, stats->nullPolyInlineCacheCount, stats->emptyPolyInlineCacheCount, stats->ignoredPolyInlineCacheCount, stats->disabledPolyInlineCacheCount, stats->equivPolyInlineCacheCount, stats->nonEquivPolyInlineCacheCount, stats->clonedPolyInlineCacheCount); } else { - Output::Print(L"EquivObjTypeSpec: function %s (%s): inline cache stats unavailable\n", body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer)); + Output::Print(_u("EquivObjTypeSpec: function %s (%s): inline cache stats unavailable\n"), body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer)); } Output::Flush(); } @@ -1290,7 +1290,7 @@ void NativeCodeGenerator::LogCodeGenStart(CodeGenWorkItem * workItem, LARGE_INTE void NativeCodeGenerator::LogCodeGenDone(CodeGenWorkItem * workItem, LARGE_INTEGER * start_time) { Js::FunctionBody * body = workItem->GetFunctionBody(); - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; { if (IS_JS_ETW(EventEnabledJSCRIPT_FUNCTION_JIT_STOP())) @@ -1324,7 +1324,7 @@ void NativeCodeGenerator::LogCodeGenDone(CodeGenWorkItem * workItem, LARGE_INTEG #if DBG_DUMP if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::BackEndPhase)) { - Output::Print(L"---EndBackEnd---\r\n"); + Output::Print(_u("---EndBackEnd---\r\n")); Output::Flush(); } #endif @@ -1338,7 +1338,7 @@ void NativeCodeGenerator::LogCodeGenDone(CodeGenWorkItem * workItem, LARGE_INTEG if (workItem->GetEntryPoint()->IsLoopBody()) { Output::Print( - L"EndBackEnd - function: %s (%s, line %u), loop: %u, mode: %S, time:%8.6f mSec", + _u("EndBackEnd - function: %s (%s, line %u), loop: %u, mode: %S, time:%8.6f mSec"), body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer), body->GetLineNumber(), @@ -1348,17 +1348,17 @@ void NativeCodeGenerator::LogCodeGenDone(CodeGenWorkItem * workItem, LARGE_INTEG if (body->GetIsAsmjsMode()) { - Output::Print(L" (Asmjs)\n"); + Output::Print(_u(" (Asmjs)\n")); } else { - Output::Print(L"\n"); + Output::Print(_u("\n")); } } else { Output::Print( - L"EndBackEnd - function: %s (%s, line %u), mode: %S time:%8.6f mSec", + _u("EndBackEnd - function: %s (%s, line %u), mode: %S time:%8.6f mSec"), body->GetDisplayName(), body->GetDebugNumberSet(debugStringBuffer), body->GetLineNumber(), @@ -1367,11 +1367,11 @@ void NativeCodeGenerator::LogCodeGenDone(CodeGenWorkItem * workItem, LARGE_INTEG if (body->GetIsAsmjsMode()) { - Output::Print(L" (Asmjs)\n"); + Output::Print(_u(" (Asmjs)\n")); } else { - Output::Print(L"\n"); + Output::Print(_u("\n")); } } Output::Flush(); @@ -1496,15 +1496,16 @@ NativeCodeGenerator::CheckAsmJsCodeGen(Js::ScriptFunction * function) { Output::Print(_u("Codegen not done yet for function: %s, Entrypoint is CheckAsmJsCodeGenThunk\n"), function->GetFunctionBody()->GetDisplayName()); } - return reinterpret_cast(entryPoint->GetNativeAddress()); + return reinterpret_cast(entryPoint->GetNativeAddress()); } if (PHASE_TRACE1(Js::AsmjsEntryPointInfoPhase)) { Output::Print(_u("CodeGen Done for function: %s, Changing Entrypoint to Full JIT\n"), function->GetFunctionBody()->GetDisplayName()); } // we will need to set the functionbody external and asmjs entrypoint to the fulljit entrypoint - return CheckCodeGenDone(functionBody, entryPoint, function); + return reinterpret_cast(CheckCodeGenDone(functionBody, entryPoint, function)); } + Js::JavascriptMethod NativeCodeGenerator::CheckCodeGen(Js::ScriptFunction * function) { @@ -1548,6 +1549,13 @@ NativeCodeGenerator::CheckCodeGen(Js::ScriptFunction * function) if(!nativeCodeGen->Processor()->PrioritizeJob(nativeCodeGen, entryPoint, function)) { +#ifdef ENABLE_SCRIPT_PROFILING +#define originalEntryPoint_IS_ProfileDeferredParsingThunk \ + (originalEntryPoint == ProfileDeferredParsingThunk) +#else +#define originalEntryPoint_IS_ProfileDeferredParsingThunk \ + false +#endif // Job was not yet processed // originalEntryPoint is the last known good entry point for the function body. Here we verify that // it either corresponds with this codegen episode (identified by function->entryPointIndex) of the function body @@ -1557,7 +1565,7 @@ NativeCodeGenerator::CheckCodeGen(Js::ScriptFunction * function) ( originalEntryPoint == DefaultEntryThunk || scriptContext->IsDynamicInterpreterThunk(originalEntryPoint) - || originalEntryPoint == ProfileDeferredParsingThunk + || originalEntryPoint_IS_ProfileDeferredParsingThunk || originalEntryPoint == DefaultDeferredParsingThunk || ( functionBody->GetSimpleJitEntryPointInfo() && @@ -3143,14 +3151,14 @@ NativeCodeGenerator::EnterScriptStart() } void -FreeNativeCodeGenAllocation(Js::ScriptContext *scriptContext, void * address) +FreeNativeCodeGenAllocation(Js::ScriptContext *scriptContext, Js::JavascriptMethod address) { if (!scriptContext->GetNativeCodeGenerator()) { return; } - scriptContext->GetNativeCodeGenerator()->QueueFreeNativeCodeGenAllocation(address); + scriptContext->GetNativeCodeGenerator()->QueueFreeNativeCodeGenAllocation((void*)address); } bool TryReleaseNonHiPriWorkItem(Js::ScriptContext* scriptContext, CodeGenWorkItem* workItem) diff --git a/lib/Backend/NativeCodeGenerator.h b/lib/Backend/NativeCodeGenerator.h index 1a5192f5682..42bc3bc021a 100644 --- a/lib/Backend/NativeCodeGenerator.h +++ b/lib/Backend/NativeCodeGenerator.h @@ -12,6 +12,7 @@ namespace Js { class ObjTypeSpecFldInfo; class FunctionCodeGenJitTimeData; + class RemoteScriptContext; }; class NativeCodeGenerator sealed : public JsUtil::WaitableJobManager diff --git a/lib/Backend/Opnd.cpp b/lib/Backend/Opnd.cpp index b899799262b..ab68c8cd497 100644 --- a/lib/Backend/Opnd.cpp +++ b/lib/Backend/Opnd.cpp @@ -91,7 +91,7 @@ Opnd::IsWriteBarrierTriggerableValue() // If this operand is known address, then it doesn't need a write barrier, the address is either not a GC address or is pinned // If its null/boolean/undefined, we don't need a write barrier since the javascript library will keep those guys alive return this->IsNotTaggedValue() && - !((this->IsAddrOpnd() && this->AsAddrOpnd()->GetKind() == AddrOpndKindDynamicVar) || + !((this->IsAddrOpnd() && static_cast(this->AsAddrOpnd()->GetKind()) == AddrOpndKindDynamicVar) || (this->GetValueType().IsBoolean() || this->GetValueType().IsNull() || this->GetValueType().IsUndefined())); } @@ -3141,7 +3141,7 @@ Opnd::DumpOpndKindMemRef(bool AsmDumpMode, Func *func) void Opnd::WriteToBuffer(_Outptr_result_buffer_(*count) char16 **buffer, size_t *count, const char16 *fmt, ...) { - va_list argptr = nullptr; + va_list argptr; va_start(argptr, fmt); int len = _vsnwprintf_s(*buffer, *count, _TRUNCATE, fmt, argptr); diff --git a/lib/Backend/PDataManager.cpp b/lib/Backend/PDataManager.cpp index c19ac15dbf9..1cb71dc2ce5 100644 --- a/lib/Backend/PDataManager.cpp +++ b/lib/Backend/PDataManager.cpp @@ -7,6 +7,11 @@ // Conditionally-compiled on x64 and arm #if PDATA_ENABLED +#ifdef _WIN32 +// ---------------------------------------------------------------------------- +// _WIN32 x64 unwind uses PDATA +// ---------------------------------------------------------------------------- + void PDataManager::RegisterPdata(RUNTIME_FUNCTION* pdataStart, _In_ const ULONG_PTR functionStart, _In_ const ULONG_PTR functionEnd, _Out_ PVOID* pdataTable, ULONG entryCount, ULONG maxEntryCount) { BOOLEAN success = FALSE; @@ -48,4 +53,22 @@ void PDataManager::UnregisterPdata(RUNTIME_FUNCTION* pdata) Assert(success); } } -#endif + +#else // !_WIN32 +// ---------------------------------------------------------------------------- +// !_WIN32 x64 unwind uses .eh_frame +// ---------------------------------------------------------------------------- + +void PDataManager::RegisterPdata(RUNTIME_FUNCTION* pdataStart, _In_ const ULONG_PTR functionStart, _In_ const ULONG_PTR functionEnd, _Out_ PVOID* pdataTable, ULONG entryCount, ULONG maxEntryCount) +{ + __register_frame(pdataStart); + *pdataTable = pdataStart; +} + +void PDataManager::UnregisterPdata(RUNTIME_FUNCTION* pdata) +{ + __deregister_frame(pdata); +} + +#endif // !_WIN32 +#endif // PDATA_ENABLED diff --git a/lib/Backend/PrologEncoder.cpp b/lib/Backend/PrologEncoder.cpp index d6f05f74196..5b41041e50a 100644 --- a/lib/Backend/PrologEncoder.cpp +++ b/lib/Backend/PrologEncoder.cpp @@ -5,6 +5,11 @@ #include "Backend.h" #include "PrologEncoderMD.h" +#ifdef _WIN32 +// ---------------------------------------------------------------------------- +// _WIN32 x64 unwind uses PDATA +// ---------------------------------------------------------------------------- + void PrologEncoder::RecordNonVolRegSave() { requiredUnwindCodeNodeCount++; @@ -175,12 +180,12 @@ BYTE *PrologEncoder::Finalize(BYTE *functionStart, pdata->runtimeFunction.EndAddress = codeSize; pdata->runtimeFunction.UnwindData = (DWORD)((pdataBuffer + sizeof(RUNTIME_FUNCTION)) - functionStart); - FinalizeUnwindInfo(); + FinalizeUnwindInfo(functionStart, codeSize); return (BYTE *)&pdata->runtimeFunction; } -void PrologEncoder::FinalizeUnwindInfo() +void PrologEncoder::FinalizeUnwindInfo(BYTE *functionStart, DWORD codeSize) { pdata->unwindInfo.Version = 1; pdata->unwindInfo.Flags = 0; @@ -213,3 +218,127 @@ BYTE *PrologEncoder::GetUnwindInfo() { return (BYTE *)&pdata->unwindInfo; } + +#else // !_WIN32 +// ---------------------------------------------------------------------------- +// !_WIN32 x64 unwind uses .eh_frame +// ---------------------------------------------------------------------------- + +static const int SMALL_EHFRAME_SIZE = 0x40; + +void PrologEncoder::EncodeSmallProlog(uint8 prologSize, size_t size) +{ + Assert(ehFrame == nullptr); + + BYTE* buffer = AnewArray(alloc, BYTE, SMALL_EHFRAME_SIZE); + ehFrame = Anew(alloc, EhFrame, buffer, SMALL_EHFRAME_SIZE); + + auto fde = ehFrame->GetFDE(); + + // prolog: push rbp + fde->cfi_advance_loc(1); // DW_CFA_advance_loc: 1 + fde->cfi_def_cfa_offset(MachPtr * 2); // DW_CFA_def_cfa_offset: 16 + fde->cfi_offset(GetDwarfRegNum(LowererMDArch::GetRegFramePointer()), 2); // DW_CFA_offset: r6 (rbp) at cfa-16 + + ehFrame->End(); +} + +DWORD PrologEncoder::SizeOfPData() +{ + return ehFrame->Count(); +} + +BYTE* PrologEncoder::Finalize(BYTE *functionStart, DWORD codeSize, BYTE *pdataBuffer) +{ + auto fde = ehFrame->GetFDE(); + fde->UpdateAddressRange(functionStart, codeSize); + return ehFrame->Buffer(); +} + +// TODO: We can also pre-calculate size needed based on #push/xmm/saves/stack allocs +static const int JIT_EHFRAME_SIZE = 0x80; + +void PrologEncoder::Begin(size_t prologStartOffset) +{ + Assert(ehFrame == nullptr); + Assert(currentInstrOffset == 0); + + BYTE* buffer = AnewArray(alloc, BYTE, JIT_EHFRAME_SIZE); + ehFrame = Anew(alloc, EhFrame, buffer, JIT_EHFRAME_SIZE); + + currentInstrOffset = prologStartOffset; +} + +void PrologEncoder::End() +{ + ehFrame->End(); +} + +void PrologEncoder::FinalizeUnwindInfo(BYTE *functionStart, DWORD codeSize) +{ + auto fde = ehFrame->GetFDE(); + fde->UpdateAddressRange(functionStart, codeSize); +} + +void PrologEncoder::EncodeInstr(IR::Instr *instr, unsigned __int8 size) +{ + auto fde = ehFrame->GetFDE(); + + uint8 unwindCodeOp = PrologEncoderMD::GetOp(instr); + + Assert((currentInstrOffset + size) > currentInstrOffset); + currentInstrOffset += size; + + switch (unwindCodeOp) + { + case UWOP_PUSH_NONVOL: + { + const uword advance = currentInstrOffset - cfiInstrOffset; + cfiInstrOffset = currentInstrOffset; + cfaWordOffset++; + + fde->cfi_advance(advance); // DW_CFA_advance_loc: ? + fde->cfi_def_cfa_offset(cfaWordOffset * MachPtr); // DW_CFA_def_cfa_offset: ?? + + const ubyte reg = PrologEncoderMD::GetNonVolRegToSave(instr) + 1; + fde->cfi_offset(GetDwarfRegNum(reg), cfaWordOffset); // DW_CFA_offset: r? at cfa-?? + break; + } + + case UWOP_SAVE_XMM128: + { + // TODO + break; + } + + case UWOP_ALLOC_SMALL: + case UWOP_ALLOC_LARGE: + { + size_t allocaSize = PrologEncoderMD::GetAllocaSize(instr); + Assert(allocaSize % MachPtr == 0); + + size_t slots = allocaSize / MachPtr; + Assert(cfaWordOffset + slots > cfaWordOffset); + + const uword advance = currentInstrOffset - cfiInstrOffset; + cfiInstrOffset = currentInstrOffset; + cfaWordOffset += slots; + + fde->cfi_advance(advance); // DW_CFA_advance_loc: ? + fde->cfi_def_cfa_offset(cfaWordOffset * MachPtr); // DW_CFA_def_cfa_offset: ?? + break; + } + + case UWOP_IGNORE: + { + return; + } + + default: + { + AssertMsg(false, "PrologEncoderMD returned unsupported UnwindCodeOp."); + } + } +} + +#endif // !_WIN32 diff --git a/lib/Backend/PrologEncoder.h b/lib/Backend/PrologEncoder.h index 17e3a771067..b6badde4bf2 100644 --- a/lib/Backend/PrologEncoder.h +++ b/lib/Backend/PrologEncoder.h @@ -19,6 +19,11 @@ enum UnwindOp : unsigned __int8 { UWOP_SAVE_XMM128 = 8 }; +#ifdef _WIN32 +// ---------------------------------------------------------------------------- +// _WIN32 x64 unwind uses PDATA +// ---------------------------------------------------------------------------- + class PrologEncoder { private: @@ -147,11 +152,53 @@ class PrologEncoder // // Win8 PDATA registration. // + void Begin(size_t prologStartOffset) {} // No op on _WIN32 + void End() {} // No op on _WIN32 DWORD SizeOfUnwindInfo(); BYTE *GetUnwindInfo(); - void FinalizeUnwindInfo(); - + void FinalizeUnwindInfo(BYTE *functionStart, DWORD codeSize); private: UnwindCode *GetUnwindCode(unsigned __int8 nodeCount); }; + +#else // !_WIN32 +// ---------------------------------------------------------------------------- +// !_WIN32 x64 unwind uses .eh_frame +// ---------------------------------------------------------------------------- +#include "EhFrame.h" + +class PrologEncoder +{ +private: + ArenaAllocator* alloc; + EhFrame* ehFrame; + + size_t cfiInstrOffset; // last cfi emit instr offset + size_t currentInstrOffset; // current instr offset + // currentInstrOffset - cfiInstrOffset == advance + unsigned cfaWordOffset; + +public: + PrologEncoder(ArenaAllocator *alloc) + : alloc(alloc), ehFrame(nullptr), + cfiInstrOffset(0), currentInstrOffset(0), cfaWordOffset(1) + {} + + void RecordNonVolRegSave() {} + void RecordXmmRegSave() {} + void RecordAlloca(size_t size) {} + void EncodeInstr(IR::Instr *instr, uint8 size); + + void EncodeSmallProlog(uint8 prologSize, size_t size); + DWORD SizeOfPData(); + BYTE *Finalize(BYTE *functionStart, DWORD codeSize, BYTE *pdataBuffer); + + void Begin(size_t prologStartOffset); + void End(); + DWORD SizeOfUnwindInfo() { return SizeOfPData(); } + BYTE *GetUnwindInfo() { return ehFrame->Buffer(); } + void FinalizeUnwindInfo(BYTE *functionStart, DWORD codeSize); +}; + +#endif // !_WIN32 diff --git a/lib/Backend/SccLiveness.cpp b/lib/Backend/SccLiveness.cpp index 2337b080b9b..3be11c3bde4 100644 --- a/lib/Backend/SccLiveness.cpp +++ b/lib/Backend/SccLiveness.cpp @@ -376,36 +376,36 @@ SCCLiveness::ProcessSrc(IR::Opnd *src, IR::Instr *instr) } else if (!this->lastCall && src->IsSymOpnd() && src->AsSymOpnd()->m_sym->AsStackSym()->IsParamSlotSym()) { - IR::SymOpnd *symOpnd = src->AsSymOpnd(); - RegNum reg = LinearScanMD::GetParamReg(symOpnd, this->func); - - if (reg != RegNOREG && PHASE_ON(Js::RegParamsPhase, this->func)) - { - StackSym *stackSym = symOpnd->m_sym->AsStackSym(); - Lifetime *lifetime = stackSym->scratch.linearScan.lifetime; - - if (lifetime == nullptr) - { - lifetime = this->InsertLifetime(stackSym, reg, this->func->m_headInstr->m_next); - lifetime->region = this->curRegion; - lifetime->isFloat = symOpnd->IsFloat(); - lifetime->isSimd128F4 = symOpnd->IsSimd128F4(); - lifetime->isSimd128I4 = symOpnd->IsSimd128I4(); - lifetime->isSimd128I8 = symOpnd->IsSimd128I8(); - lifetime->isSimd128I16 = symOpnd->IsSimd128I16(); - lifetime->isSimd128U4 = symOpnd->IsSimd128U4(); - lifetime->isSimd128U8 = symOpnd->IsSimd128U8(); - lifetime->isSimd128U16 = symOpnd->IsSimd128U16(); - lifetime->isSimd128B4 = symOpnd->IsSimd128B4(); - lifetime->isSimd128B8 = symOpnd->IsSimd128B8(); - lifetime->isSimd128B16 = symOpnd->IsSimd128B16(); - lifetime->isSimd128D2 = symOpnd->IsSimd128D2(); - } - - IR::RegOpnd * newRegOpnd = IR::RegOpnd::New(stackSym, reg, symOpnd->GetType(), this->func); - instr->ReplaceSrc(symOpnd, newRegOpnd); - this->ProcessRegUse(newRegOpnd, instr); - } + IR::SymOpnd *symOpnd = src->AsSymOpnd(); + RegNum reg = LinearScanMD::GetParamReg(symOpnd, this->func); + + if (reg != RegNOREG && PHASE_ON(Js::RegParamsPhase, this->func)) + { + StackSym *stackSym = symOpnd->m_sym->AsStackSym(); + Lifetime *lifetime = stackSym->scratch.linearScan.lifetime; + + if (lifetime == nullptr) + { + lifetime = this->InsertLifetime(stackSym, reg, this->func->m_headInstr->m_next); + lifetime->region = this->curRegion; + lifetime->isFloat = symOpnd->IsFloat(); + lifetime->isSimd128F4 = symOpnd->IsSimd128F4(); + lifetime->isSimd128I4 = symOpnd->IsSimd128I4(); + lifetime->isSimd128I8 = symOpnd->IsSimd128I8(); + lifetime->isSimd128I16 = symOpnd->IsSimd128I16(); + lifetime->isSimd128U4 = symOpnd->IsSimd128U4(); + lifetime->isSimd128U8 = symOpnd->IsSimd128U8(); + lifetime->isSimd128U16 = symOpnd->IsSimd128U16(); + lifetime->isSimd128B4 = symOpnd->IsSimd128B4(); + lifetime->isSimd128B8 = symOpnd->IsSimd128B8(); + lifetime->isSimd128B16 = symOpnd->IsSimd128B16(); + lifetime->isSimd128D2 = symOpnd->IsSimd128D2(); + } + + IR::RegOpnd * newRegOpnd = IR::RegOpnd::New(stackSym, reg, symOpnd->GetType(), this->func); + instr->ReplaceSrc(symOpnd, newRegOpnd); + this->ProcessRegUse(newRegOpnd, instr); + } } } @@ -769,8 +769,8 @@ SCCLiveness::FoldIndir(IR::Instr *instr, IR::Opnd *opnd) // offset = indir.offset + (index << scale) int32 offset = index->m_sym->GetIntConstValue(); - if(indir->GetScale() != 0 && Int32Math::Shl(offset, indir->GetScale(), &offset) || - indir->GetOffset() != 0 && Int32Math::Add(indir->GetOffset(), offset, &offset)) + if((indir->GetScale() != 0 && Int32Math::Shl(offset, indir->GetScale(), &offset)) || + (indir->GetOffset() != 0 && Int32Math::Add(indir->GetOffset(), offset, &offset))) { return false; } diff --git a/lib/Backend/ServerScriptContext.cpp b/lib/Backend/ServerScriptContext.cpp index ea2b6faab16..76bfb975e79 100644 --- a/lib/Backend/ServerScriptContext.cpp +++ b/lib/Backend/ServerScriptContext.cpp @@ -281,13 +281,13 @@ ServerScriptContext::Close() void ServerScriptContext::BeginJIT() { - InterlockedExchangeAdd(&m_activeJITCount, 1); + InterlockedExchangeAdd(&m_activeJITCount, 1u); } void ServerScriptContext::EndJIT() { - InterlockedExchangeSubtract(&m_activeJITCount, 1); + InterlockedExchangeSubtract(&m_activeJITCount, 1u); } bool @@ -296,8 +296,8 @@ ServerScriptContext::IsJITActive() return m_activeJITCount != 0; } -Js::Var* -ServerScriptContext::GetModuleExportSlotArrayAddress(uint moduleIndex, uint slotIndex) +Js::Var* +ServerScriptContext::GetModuleExportSlotArrayAddress(uint moduleIndex, uint slotIndex) { Assert(m_moduleRecords.ContainsKey(moduleIndex)); auto record = m_moduleRecords.Item(moduleIndex); @@ -310,7 +310,7 @@ ServerScriptContext::SetIsPRNGSeeded(bool value) m_isPRNGSeeded = value; } -void +void ServerScriptContext::AddModuleRecordInfo(unsigned int moduleId, __int64 localExportSlotsAddr) { Js::ServerSourceTextModuleRecord* record = HeapNewStructZ(Js::ServerSourceTextModuleRecord); diff --git a/lib/Backend/ServerThreadContext.cpp b/lib/Backend/ServerThreadContext.cpp index 5c93c861b7f..867d1db5295 100644 --- a/lib/Backend/ServerThreadContext.cpp +++ b/lib/Backend/ServerThreadContext.cpp @@ -15,9 +15,9 @@ ServerThreadContext::ServerThreadContext(ThreadContextDataIDL * data) : m_codeGenAlloc(&m_policyManager, nullptr, &m_codePageAllocators, (HANDLE)data->processHandle), // TODO: OOP JIT, don't hardcode name #ifdef NTBUILD - m_jitChakraBaseAddress((intptr_t)GetModuleHandle(L"Chakra.dll")), + m_jitChakraBaseAddress((intptr_t)GetModuleHandle(_u("Chakra.dll"))), #else - m_jitChakraBaseAddress((intptr_t)GetModuleHandle(L"ChakraCore.dll")), + m_jitChakraBaseAddress((intptr_t)GetModuleHandle(_u("ChakraCore.dll"))), #endif m_jitCRTBaseAddress((intptr_t)GetModuleHandle(UCrtC99MathApis::LibraryName)) { @@ -29,10 +29,10 @@ ServerThreadContext::ServerThreadContext(ThreadContextDataIDL * data) : ServerThreadContext::~ServerThreadContext() { - // TODO: OOP JIT, clear out elements of map. maybe should arena alloc? + // TODO: OOP JIT, clear out elements of map. maybe should arena alloc? if (this->m_propertyMap != nullptr) { - this->m_propertyMap->Map([](const Js::PropertyRecord* record) + this->m_propertyMap->Map([](const Js::PropertyRecord* record) { size_t allocLength = record->byteCount + sizeof(char16) + (record->isNumeric ? sizeof(uint32) : 0); HeapDeletePlus(allocLength, const_cast(record)); @@ -126,7 +126,7 @@ ServerThreadContext::GetImplicitCallFlagsAddr() const return static_cast(m_threadContextData.implicitCallFlagsAddr); } -#if defined(_M_IX86) || defined(_M_X64) +#if defined(ENABLE_SIMDJS) && (defined(_M_IX86) || defined(_M_X64)) intptr_t ServerThreadContext::GetSimdTempAreaAddr(uint8 tempIndex) const { diff --git a/lib/Backend/ServerThreadContext.h b/lib/Backend/ServerThreadContext.h index ece25b1370f..7fb69d60855 100644 --- a/lib/Backend/ServerThreadContext.h +++ b/lib/Backend/ServerThreadContext.h @@ -19,7 +19,7 @@ class ServerThreadContext : public ThreadContextInfo virtual intptr_t GetThreadStackLimitAddr() const override; -#if defined(_M_IX86) || defined(_M_X64) +#if defined(ENABLE_SIMDJS) && (defined(_M_IX86) || defined(_M_X64)) virtual intptr_t GetSimdTempAreaAddr(uint8 tempIndex) const override; #endif diff --git a/lib/Backend/TempTracker.cpp b/lib/Backend/TempTracker.cpp index 16e2855a37c..0edf8416564 100644 --- a/lib/Backend/TempTracker.cpp +++ b/lib/Backend/TempTracker.cpp @@ -240,7 +240,7 @@ TempTracker::ProcessUse(StackSym * sym, BackwardPass * backwardPass) { // Record that the usedSymID may propagate to dstSymID and all the symbols // that it may propagate to as well - AddTransferDependencies(usedSymID, dstSymID, this->tempTransferDependencies); + this->AddTransferDependencies(usedSymID, dstSymID, this->tempTransferDependencies); #if DBG_DUMP if (T::DoTrace(backwardPass)) { @@ -258,7 +258,7 @@ TempTracker::ProcessUse(StackSym * sym, BackwardPass * backwardPass) { this->tempTransferredSyms.Set(usedSymID); PropertySym * propertySym = instr->GetDst()->AsSymOpnd()->m_sym->AsPropertySym(); - PropagateTempPropertyTransferStoreDependencies(usedSymID, propertySym, backwardPass); + this->PropagateTempPropertyTransferStoreDependencies(usedSymID, propertySym, backwardPass); #if DBG_DUMP if (T::DoTrace(backwardPass) && this->tempTransferDependencies) @@ -1031,7 +1031,7 @@ ObjectTemp::IsTempUseOpCodeSym(IR::Instr * instr, Js::OpCode opcode, Sym * sym) case Js::OpCode::StElemI_A_Strict: return instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->m_sym == sym; case Js::OpCode::Memset: - return instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->m_sym == sym || instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym == sym; + return instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->m_sym == sym || (instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym == sym); case Js::OpCode::Memcopy: return instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->m_sym == sym || instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->m_sym == sym; diff --git a/lib/Backend/amd64/EncoderMD.cpp b/lib/Backend/amd64/EncoderMD.cpp index 68405e0815e..caef24c7147 100644 --- a/lib/Backend/amd64/EncoderMD.cpp +++ b/lib/Backend/amd64/EncoderMD.cpp @@ -1759,8 +1759,8 @@ bool EncoderMD::TryConstFold(IR::Instr *instr, IR::RegOpnd *regOpnd) // offset = indir.offset + (index << scale) int32 offset = regOpnd->m_sym->GetIntConstValue(); - if (indir->GetScale() != 0 && Int32Math::Shl(offset, indir->GetScale(), &offset) || - indir->GetOffset() != 0 && Int32Math::Add(indir->GetOffset(), offset, &offset)) + if ((indir->GetScale() != 0 && Int32Math::Shl(offset, indir->GetScale(), &offset)) || + (indir->GetOffset() != 0 && Int32Math::Add(indir->GetOffset(), offset, &offset))) { foldedAllUses = false; continue; diff --git a/lib/Backend/amd64/LinearScanMD.cpp b/lib/Backend/amd64/LinearScanMD.cpp index 97ef11d86ee..1b4dc21f193 100644 --- a/lib/Backend/amd64/LinearScanMD.cpp +++ b/lib/Backend/amd64/LinearScanMD.cpp @@ -238,13 +238,13 @@ LinearScanMD::GenerateBailOut(IR::Instr * instr, __in_ecount(registerSaveSymsCou Assert(static_cast(registerSaveSymsCount) == static_cast(RegNumCount-1)); // Save registers used for parameters, and rax, if necessary, into the shadow space allocated for register parameters: - // mov [rsp + 16], rdx - // mov [rsp + 8], rcx + // mov [rsp + 16], RegArg1 (if branchConditionOpnd) + // mov [rsp + 8], RegArg0 // mov [rsp], rax - for(RegNum reg = bailOutInfo->branchConditionOpnd ? RegRDX : RegRCX; - reg != RegNOREG; - reg = static_cast(reg - 1)) + const RegNum regs[3] = { RegRAX, RegArg0, RegArg1 }; + for (int i = (bailOutInfo->branchConditionOpnd ? 2 : 1); i >= 0; i--) { + RegNum reg = regs[i]; StackSym *const stackSym = registerSaveSyms[reg - 1]; if(!stackSym) { @@ -253,7 +253,7 @@ LinearScanMD::GenerateBailOut(IR::Instr * instr, __in_ecount(registerSaveSymsCou const IRType regType = RegTypes[reg]; Lowerer::InsertMove( - IR::SymOpnd::New(func->m_symTable->GetArgSlotSym(static_cast(reg)), regType, func), + IR::SymOpnd::New(func->m_symTable->GetArgSlotSym(static_cast(i + 1)), regType, func), IR::RegOpnd::New(stackSym, reg, regType, func), instr); } @@ -261,44 +261,42 @@ LinearScanMD::GenerateBailOut(IR::Instr * instr, __in_ecount(registerSaveSymsCou if(bailOutInfo->branchConditionOpnd) { // Pass in the branch condition - // mov rdx, condition + // mov RegArg1, condition IR::Instr *const newInstr = Lowerer::InsertMove( - IR::RegOpnd::New(nullptr, RegRDX, bailOutInfo->branchConditionOpnd->GetType(), func), + IR::RegOpnd::New(nullptr, RegArg1, bailOutInfo->branchConditionOpnd->GetType(), func), bailOutInfo->branchConditionOpnd, instr); linearScan->SetSrcRegs(newInstr); } - if (!func->IsOOPJIT()) { // Pass in the bailout record - // mov rcx, bailOutRecord + // mov RegArg0, bailOutRecord Lowerer::InsertMove( - IR::RegOpnd::New(nullptr, RegRCX, TyMachPtr, func), + IR::RegOpnd::New(nullptr, RegArg0, TyMachPtr, func), IR::AddrOpnd::New(bailOutInfo->bailOutRecord, IR::AddrOpndKindDynamicBailOutRecord, func, true), instr); - } else { - // move rcx, dataAddr + // move RegArg0, dataAddr Lowerer::InsertMove( - IR::RegOpnd::New(nullptr, RegRCX, TyMachPtr, func), + IR::RegOpnd::New(nullptr, RegArg0, TyMachPtr, func), IR::AddrOpnd::New(func->GetWorkItem()->GetWorkItemData()->nativeDataAddr, IR::AddrOpndKindDynamicNativeCodeDataRef, func), instr); - // mov rcx, [rcx] + // mov RegArg0, [RegArg0] Lowerer::InsertMove( - IR::RegOpnd::New(nullptr, RegRCX, TyMachPtr, func), - IR::IndirOpnd::New(IR::RegOpnd::New(nullptr, RegRCX, TyVar, this->func), 0, TyMachPtr, func), + IR::RegOpnd::New(nullptr, RegArg0, TyMachPtr, func), + IR::IndirOpnd::New(IR::RegOpnd::New(nullptr, RegArg0, TyVar, this->func), 0, TyMachPtr, func), instr); - // lea rcx, [rcx + bailoutRecord_offset] + // lea RegArg0, [RegArg0 + bailoutRecord_offset] int bailoutRecordOffset = NativeCodeData::GetDataTotalOffset(bailOutInfo->bailOutRecord); - Lowerer::InsertLea(IR::RegOpnd::New(nullptr, RegRCX, TyVar, this->func), - IR::IndirOpnd::New(IR::RegOpnd::New(nullptr, RegRCX, TyVar, this->func), bailoutRecordOffset, TyMachPtr, + Lowerer::InsertLea(IR::RegOpnd::New(nullptr, RegArg0, TyVar, this->func), + IR::IndirOpnd::New(IR::RegOpnd::New(nullptr, RegArg0, TyVar, this->func), bailoutRecordOffset, TyMachPtr, #if DBG NativeCodeData::GetDataDescription(bailOutInfo->bailOutRecord, func->m_alloc), #endif @@ -511,7 +509,7 @@ RegNum LinearScanMD::GetParamReg(IR::SymOpnd *symOpnd, Func *func) switch (paramSym->GetParamSlotNum()) { case 1: - reg = RegRCX; + reg = RegArg0; break; default: Assert(UNREACHED); @@ -522,13 +520,13 @@ RegNum LinearScanMD::GetParamReg(IR::SymOpnd *symOpnd, Func *func) switch (paramSym->GetParamSlotNum()) { case 1: - reg = RegRDX; + reg = RegArg1; break; case 2: - reg = RegR8; + reg = RegArg2; break; case 3: - reg = RegR9; + reg = RegArg3; break; } } @@ -543,10 +541,10 @@ RegNum LinearScanMD::GetParamReg(IR::SymOpnd *symOpnd, Func *func) switch (paramSym->GetParamSlotNum()) { case 1: - reg = RegRDX; + reg = RegArg0; break; case 2: - reg = RegRCX; + reg = RegArg1; break; } } @@ -555,14 +553,14 @@ RegNum LinearScanMD::GetParamReg(IR::SymOpnd *symOpnd, Func *func) switch (paramSym->GetParamSlotNum()) { case 1: - reg = RegR8; + reg = RegArg2; break; case 2: - reg = RegR9; + reg = RegArg3; break; } } } return reg; -} \ No newline at end of file +} diff --git a/lib/Backend/amd64/LinearScanMdA.S b/lib/Backend/amd64/LinearScanMdA.S new file mode 100644 index 00000000000..cb3e833ecd1 --- /dev/null +++ b/lib/Backend/amd64/LinearScanMdA.S @@ -0,0 +1,137 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +.intel_syntax noprefix +#include "unixasmmacros.inc" + + +// BailOutRecord::BailOut(BailOutRecord const * bailOutRecord) +// .extern _ZN13BailOutRecord7BailOutEPKS_ + +// BranchBailOutRecord::BailOut(BranchBailOutRecord const * bailOutRecord, BOOL cond) +// .extern _ZN19BranchBailOutRecord7BailOutEPKS_i + + +//------------------------------------------------------------------------------ +// LinearScanMD::SaveAllRegisters(BailOutRecord *const bailOutRecord) + +.balign 16 +LEAF_ENTRY _ZN12LinearScanMD26SaveAllRegistersEP13BailOutRecord, _TEXT + + // [rsp + 7 * 8] == saved rax + // [rsp + 8 * 8] == saved rdi + // [rsp + 9 * 8] == saved rsi + // rdi == bailOutRecord + // rsi == condition + + mov rax, [rdi] // bailOutRecord->globalBailOutRecordDataTable + mov rax, [rax] // bailOutRecord->globalBailOutRecordDataTable->registerSaveSpace + + // Save r8 first to free up a register + mov [rax + 8 * 8], r8 + + // Save the original values of rax, rdi, and rsi into the actual register save space + mov r8, [rsp + 7 * 8] // saved rax + mov [rax + 0 * 8], r8 + mov r8, [rsp + 8 * 8] // saved rdi + mov [rax + 7 * 8], r8 + mov r8, [rsp + 9 * 8] // saved rsi + mov [rax + 6 * 8], r8 + + // Save remaining registers + mov [rax + 1 * 8], rcx + mov [rax + 2 * 8], rdx + mov [rax + 3 * 8], rbx + // [rax + 4 * 8] == save space for rsp, which doesn't need to be saved since bailout uses rbp for stack access + mov [rax + 5 * 8], rbp + // mov [rax + 6 * 8], rsi // rsi saved above + // mov [rax + 7 * 8], rdi // rdi saved above + // mov [rax + 8 * 8], r8 // r8 was saved earlier + mov [rax + 9 * 8], r9 + mov [rax + 10 * 8], r10 + mov [rax + 11 * 8], r11 + mov [rax + 12 * 8], r12 + mov [rax + 13 * 8], r13 + mov [rax + 14 * 8], r14 + mov [rax + 15 * 8], r15 + + // Save all XMM regs (full width) + movups xmmword ptr [rax + 80h], xmm0 // [rax + 16 * 8 + 0 * 16] = xmm0 + movups xmmword ptr [rax + 90h], xmm1 // [rax + 16 * 8 + 1 * 16] = xmm1 + movups xmmword ptr [rax + 0a0h], xmm2 // ... + // movups xmmword ptr [rax + 0b0h], xmm3 // xplat: WHY this one fails to compile... + movups xmmword ptr [rax + 11 * 16], xmm3 + movups xmmword ptr [rax + 0c0h], xmm4 + movups xmmword ptr [rax + 0d0h], xmm5 + movups xmmword ptr [rax + 0e0h], xmm6 + movups xmmword ptr [rax + 0f0h], xmm7 + movups xmmword ptr [rax + 100h], xmm8 + movups xmmword ptr [rax + 110h], xmm9 + movups xmmword ptr [rax + 120h], xmm10 + movups xmmword ptr [rax + 130h], xmm11 + movups xmmword ptr [rax + 140h], xmm12 + movups xmmword ptr [rax + 150h], xmm13 + movups xmmword ptr [rax + 160h], xmm14 + movups xmmword ptr [rax + 170h], xmm15 // [rax + 16 * 8 + 15 * 16] = xmm15 + + ret + +LEAF_END _ZN12LinearScanMD26SaveAllRegistersEP13BailOutRecord, _TEXT + + +//------------------------------------------------------------------------------ +// LinearScanMD::SaveAllRegistersAndBailOut(BailOutRecord *const bailOutRecord) + +.balign 16 +NESTED_ENTRY _ZN12LinearScanMD26SaveAllRegistersAndBailOutEP13BailOutRecord, _TEXT, NoHandler + + // We follow Custom calling convention + // [rsp + 1 * 8] == saved rax + // [rsp + 2 * 8] == saved rdi + // rdi == bailOutRecord + + // Relative to this function, SaveAllRegisters expects: + // [rsp + 3 * 8] == saved rsi + // Since rsi is not a parameter to this function, it won't be saved on the stack by jitted code, so copy it there now + + mov [rsp + 3 * 8], rsi + + sub rsp, 28h // use the same as Windows x64 so register locations are the same + .cfi_adjust_cfa_offset 0x28 + + call C_FUNC(_ZN12LinearScanMD26SaveAllRegistersEP13BailOutRecord) + + add rsp, 28h // deallocate stack space + .cfi_adjust_cfa_offset -0x28 + + jmp _ZN13BailOutRecord7BailOutEPKS_ + +NESTED_END _ZN12LinearScanMD26SaveAllRegistersAndBailOutEP13BailOutRecord, _TEXT + + +//------------------------------------------------------------------------------ +// LinearScanMD::SaveAllRegistersAndBranchBailOut(BranchBailOutRecord *const bailOutRecord, const BOOL condition) + +.balign 16 +NESTED_ENTRY _ZN12LinearScanMD32SaveAllRegistersAndBranchBailOutEP19BranchBailOutRecordi, _TEXT, NoHandler + + // We follow custom calling convention + // [rsp + 1 * 8] == saved rax + // [rsp + 2 * 8] == saved rdi + // [rsp + 3 * 8] == saved rsi + // rdi == bailOutRecord + // rsi == condition + + sub rsp, 28h // use the same as Windows x64 so register locations are the same + .cfi_adjust_cfa_offset 0x28 + + call C_FUNC(_ZN12LinearScanMD26SaveAllRegistersEP13BailOutRecord) + + add rsp, 28h // deallocate stack space + .cfi_adjust_cfa_offset -0x28 + + jmp _ZN19BranchBailOutRecord7BailOutEPKS_i + +NESTED_END _ZN12LinearScanMD32SaveAllRegistersAndBranchBailOutEP19BranchBailOutRecordi, _TEXT diff --git a/lib/Backend/amd64/LowererMDArch.cpp b/lib/Backend/amd64/LowererMDArch.cpp index f55c311b875..cd8bbe9a521 100644 --- a/lib/Backend/amd64/LowererMDArch.cpp +++ b/lib/Backend/amd64/LowererMDArch.cpp @@ -186,7 +186,7 @@ LowererMDArch::LoadHeapArgsCached(IR::Instr *instrArgs) { instrArgs->m_opcode = Js::OpCode::MOV; instrArgs->ReplaceSrc1(IR::AddrOpnd::NewNull(func)); - + if (PHASE_TRACE1(Js::StackArgFormalsOptPhase) && func->GetJITFunctionBody()->GetInParamsCount() > 1) { Output::Print(_u("StackArgFormals : %s (%d) :Removing Heap Arguments object creation in Lowerer. \n"), instrArgs->m_func->GetJITFunctionBody()->GetDisplayName(), instrArgs->m_func->GetFunctionNumber()); @@ -203,7 +203,7 @@ LowererMDArch::LoadHeapArgsCached(IR::Instr *instrArgs) // s2 = actual argument count // s1 = current function // dst = JavascriptOperators::LoadArguments(s1, s2, s3, s4, s5, s6, s7) - + // s7 = formals are let decls IR::Opnd * formalsAreLetDecls = IR::IntConstOpnd::New((IntConstType)(instrArgs->m_opcode == Js::OpCode::LdLetHeapArgsCached), TyUint8, func); this->LoadHelperArgument(instrArgs, formalsAreLetDecls); @@ -317,7 +317,7 @@ LowererMDArch::LoadHeapArguments(IR::Instr *instrArgs, bool force /* = false */, // s2 = actual argument count // s1 = current function // dst = JavascriptOperators::LoadHeapArguments(s1, s2, s3, s4, s5, s6, s7) - + // s7 = formals are let decls this->LoadHelperArgument(instrArgs, IR::IntConstOpnd::New(instrArgs->m_opcode == Js::OpCode::LdLetHeapArguments ? TRUE : FALSE, TyUint8, func)); @@ -612,7 +612,14 @@ LowererMDArch::LowerCallIDynamic(IR::Instr *callInstr, IR::Instr*saveThisArgOutI IR::Opnd *funcObjOpnd = callInstr->UnlinkSrc1(); GeneratePreCall(callInstr, funcObjOpnd, insertBeforeInstrForCFG); - LowerCall(callInstr, 0); + + // Normally for dynamic calls we move 4 args to registers and push remaining + // args onto stack (Windows convention, and unchanged on xplat). We need to + // manully home 4 args. inlinees lower differently and follow platform ABI. + // So we need to manually home actualArgsCount + 2 args (function, callInfo). + const uint32 homeArgs = callInstr->m_func->IsInlinee() ? + callInstr->m_func->actualCount + 2 : 4; + LowerCall(callInstr, homeArgs); return callInstr; } @@ -752,7 +759,7 @@ LowererMDArch::LowerCallI(IR::Instr * callInstr, ushort callFlags, bool isHelper else if (insertBeforeInstrForCFG != nullptr) { RegNum dstReg = insertBeforeInstrForCFG->GetDst()->AsRegOpnd()->GetReg(); - AssertMsg(dstReg == RegR8 || dstReg == RegR9, "NewScObject should insert the first Argument in R8/R9 only based on Spread call or not."); + AssertMsg(dstReg == RegArg2 || dstReg == RegArg3, "NewScObject should insert the first Argument in RegArg2/RegArg3 only based on Spread call or not."); insertBeforeInstrForCFGCheck = insertBeforeInstrForCFG; } @@ -810,6 +817,23 @@ LowererMDArch::LowerCallPut(IR::Instr *callInstr) return nullptr; } +static inline IRType ExtendHelperArg(IRType type) +{ +#ifdef __clang__ + // clang expects caller to extend arg size to int + switch (type) + { + case TyInt8: + case TyInt16: + return TyInt32; + case TyUint8: + case TyUint16: + return TyUint32; + } +#endif + return type; +} + IR::Instr * LowererMDArch::LowerCall(IR::Instr * callInstr, uint32 argCount) { @@ -857,22 +881,92 @@ LowererMDArch::LowerCall(IR::Instr * callInstr, uint32 argCount) // AssertMsg(this->helperCallArgsCount >= 0, "Fatal. helper call arguments ought to be positive"); - AssertMsg(this->helperCallArgsCount < 255, "Too many helper call arguments"); + AssertMsg(this->helperCallArgsCount < MaxArgumentsToHelper && MaxArgumentsToHelper < 255, "Too many helper call arguments"); uint16 argsLeft = static_cast(this->helperCallArgsCount); + // Sys V x64 ABI assigns int and xmm arg registers separately. + // e.g. args: int, double, int, double, int, double + // Windows: int0, xmm1, int2, xmm3, stack, stack + // Sys V: int0, xmm0, int1, xmm1, int2, xmm2 +#ifdef _WIN32 +#define _V_ARG_INDEX(index) index +#else + uint16 _vindex[MaxArgumentsToHelper]; + { + uint16 intIndex = 1, doubleIndex = 1, stackIndex = IntArgRegsCount + 1; + for (int i = 0; i < this->helperCallArgsCount; i++) + { + IR::Opnd * helperSrc = this->helperCallArgs[this->helperCallArgsCount - 1 - i]; + IRType type = helperSrc->GetType(); + if (IRType_IsFloat(type) || IRType_IsSimd128(type)) + { + if (doubleIndex <= XmmArgRegsCount) + { + _vindex[i] = doubleIndex++; + } + else + { + _vindex[i] = stackIndex++; + } + } + else + { + if (intIndex <= IntArgRegsCount) + { + _vindex[i] = intIndex++; + } + else + { + _vindex[i] = stackIndex++; + } + } + } + } +#define _V_ARG_INDEX(index) _vindex[(index) - 1] +#endif + + // xplat NOTE: Lower often loads "known args" with LoadHelperArgument() and + // variadic JS runtime args with LowerCallArgs(). So the full args length is + // this->helperCallArgsCount + argCount + // "argCount > 0" indicates we have variadic JS runtime args and needs to + // manually home registers on xplat. + const bool shouldHomeParams = argCount > 0; + while (argsLeft > 0) { IR::Opnd * helperSrc = this->helperCallArgs[this->helperCallArgsCount - argsLeft]; - StackSym * helperSym = m_func->m_symTable->GetArgSlotSym(static_cast(argsLeft)); - helperSym->m_type = helperSrc->GetType(); + uint16 index = _V_ARG_INDEX(argsLeft); + StackSym * helperSym = m_func->m_symTable->GetArgSlotSym(index); + helperSym->m_type = ExtendHelperArg(helperSrc->GetType()); Lowerer::InsertMove( - this->GetArgSlotOpnd(argsLeft, helperSym), + this->GetArgSlotOpnd(index, helperSym, /*isHelper*/!shouldHomeParams), helperSrc, callInstr); --argsLeft; } +#ifndef _WIN32 + // Manually home args + if (shouldHomeParams) + { + static const RegNum s_argRegs[IntArgRegsCount] = { + #define REG_INT_ARG(Index, Name) Reg ## Name, + #include "RegList.h" + }; + + const int callArgCount = this->helperCallArgsCount + static_cast(argCount); + const int argRegs = min(callArgCount, static_cast(IntArgRegsCount)); + for (int i = argRegs - 1; i >= 0; i--) + { + StackSym * sym = this->m_func->m_symTable->GetArgSlotSym(static_cast(i + 1)); + Lowerer::InsertMove( + IR::SymOpnd::New(sym, TyMachReg, this->m_func), + IR::RegOpnd::New(nullptr, s_argRegs[i], TyMachReg, this->m_func), + callInstr); + } + } +#endif // // load the address into a register because we cannot directly access 64 bit constants @@ -909,7 +1003,7 @@ LowererMDArch::LowerCall(IR::Instr * callInstr, uint32 argCount) // the first 4 arguments go in registers and the rest are on stack. // IR::Opnd * -LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym) +LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym, bool isHelper /*= false*/) { Assert(index != 0); @@ -934,51 +1028,38 @@ LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym) } IRType type = argSym ? argSym->GetType() : TyMachReg; - if (argPosition <= 4) - { - RegNum reg = RegNOREG; + const bool isFloatArg = IRType_IsFloat(type) || IRType_IsSimd128(type); + RegNum reg = RegNOREG; - if (IRType_IsFloat(type) || IRType_IsSimd128(type)) + if (!isFloatArg && argPosition <= IntArgRegsCount) + { + switch (argPosition) { - switch (argPosition) - { - case 4: - reg = RegXMM3; - break; - case 3: - reg = RegXMM2; - break; - case 2: - reg = RegXMM1; - break; - case 1: - reg = RegXMM0; - break; - default: - Assume(UNREACHED); - } +#define REG_INT_ARG(Index, Name) \ + case ((Index) + 1): \ + reg = Reg ## Name; \ + break; +#include "RegList.h" + default: + Assume(UNREACHED); } - else + } + else if (isFloatArg && argPosition <= XmmArgRegsCount) + { + switch (argPosition) { - switch (argPosition) - { - case 4: - reg = RegR9; - break; - case 3: - reg = RegR8; - break; - case 2: - reg = RegRDX; - break; - case 1: - reg = RegRCX; - break; - default: - Assume(UNREACHED); - } +#define REG_XMM_ARG(Index, Name) \ + case ((Index) + 1): \ + reg = Reg ## Name; \ + break; +#include "RegList.h" + default: + Assume(UNREACHED); } + } + if (reg != RegNOREG) + { IR::RegOpnd *regOpnd = IR::RegOpnd::New(argSym, reg, type, m_func); regOpnd->m_isCallArg = true; @@ -988,12 +1069,17 @@ LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym) { if (argSym == nullptr) { - argSym = this->m_func->m_symTable->GetArgSlotSym(static_cast(index)); + argSym = this->m_func->m_symTable->GetArgSlotSym(index); } - // - // More than 4 arguments. Assign them to appropriate slots - // +#ifndef _WIN32 + // helper does not home args, adjust stack offset + if (isHelper) + { + const uint16 argIndex = index - IntArgRegsCount; + argSym->m_offset = (argIndex - 1) * MachPtr; + } +#endif argSlotOpnd = IR::SymOpnd::New(argSym, type, this->m_func); } @@ -1403,8 +1489,8 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr) if (Lowerer::IsArgSaveRequired(this->m_func)) { - if (argSlotsForFunctionsCalled < 4) - argSlotsForFunctionsCalled = 4; + if (argSlotsForFunctionsCalled < IntArgRegsCount) + argSlotsForFunctionsCalled = IntArgRegsCount; } else { @@ -1489,10 +1575,11 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr) IR::Instr *movRax0 = nullptr; IR::Opnd *raxOpnd = nullptr; - if (this->m_func->HasArgumentSlot() && (this->m_func->IsStackArgsEnabled() || - this->m_func->IsJitInDebugMode() || - // disabling apply inlining leads to explicit load from the zero-inited slot - this->m_func->GetJITFunctionBody()->IsInlineApplyDisabled()) + if ((this->m_func->HasArgumentSlot() && + (this->m_func->IsStackArgsEnabled() || + this->m_func->IsJitInDebugMode() || + // disabling apply inlining leads to explicit load from the zero-inited slot + this->m_func->GetJITFunctionBody()->IsInlineApplyDisabled())) #ifdef BAILOUT_INJECTION || Js::Configuration::Global.flags.IsEnabled(Js::BailOutFlag) || Js::Configuration::Global.flags.IsEnabled(Js::BailOutAtEveryLineFlag) @@ -1551,6 +1638,7 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr) firstPrologInstr->InsertBefore(IR::PragmaInstr::New(Js::OpCode::PrologStart, 0, m_func)); lastPrologInstr->InsertAfter(IR::PragmaInstr::New(Js::OpCode::PrologEnd, 0, m_func)); +#ifdef _WIN32 // home registers // // Now store all the arguments in the register in the stack slots // @@ -1631,6 +1719,7 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr) this->MovArgFromReg2Stack(entryInstr, RegR8, 3); this->MovArgFromReg2Stack(entryInstr, RegR9, 4); } +#endif // _WIN32 IntConstType frameSize = Js::Constants::MinStackJIT + stackArgsSize + stackLocalsSize + savedRegSize; this->GeneratePrologueStackProbe(entryInstr, frameSize); @@ -1761,14 +1850,14 @@ LowererMDArch::GeneratePrologueStackProbe(IR::Instr *entryInstr, IntConstType fr IR::RegOpnd *target; { - // MOV rdx, scriptContext + // MOV RegArg1, scriptContext this->lowererMD->CreateAssign( - IR::RegOpnd::New(nullptr, RegRDX, TyMachReg, m_func), + IR::RegOpnd::New(nullptr, RegArg1, TyMachReg, m_func), this->lowererMD->m_lowerer->LoadScriptContextOpnd(insertInstr), insertInstr); - // MOV rcx, frameSize + // MOV RegArg0, frameSize this->lowererMD->CreateAssign( - IR::RegOpnd::New(nullptr, RegRCX, TyMachReg, this->m_func), + IR::RegOpnd::New(nullptr, RegArg0, TyMachReg, this->m_func), IR::AddrOpnd::New((void*)frameSize, IR::AddrOpndKindConstant, this->m_func), insertInstr); // MOV rax, ThreadContext::ProbeCurrentStack @@ -2543,7 +2632,7 @@ LowererMDArch::EmitLoadInt32(IR::Instr *instrLoad, bool conversionFromObjectAllo // Need to bail out instead of calling a helper return true; } - + if (conversionFromObjectAllowed) { lowererMD->m_lowerer->LowerUnaryHelperMem(instrLoad, IR::HelperConv_ToInt32); @@ -2907,23 +2996,24 @@ LowererMDArch::LowerEHRegionReturn(IR::Instr * insertBeforeInstr, IR::Opnd * tar // Load the continuation address into the return register. insertBeforeInstr->InsertBefore(IR::Instr::New(Js::OpCode::MOV, retReg, targetOpnd, this->m_func)); - // MOV r8, spillSize - IR::Instr *movR8 = IR::Instr::New(Js::OpCode::LdSpillSize, - IR::RegOpnd::New(nullptr, RegR8, TyMachReg, m_func), + // MOV REG_EH_SPILL_SIZE, spillSize + IR::Instr *movSpillSize = IR::Instr::New(Js::OpCode::LdSpillSize, + IR::RegOpnd::New(nullptr, REG_EH_SPILL_SIZE, TyMachReg, m_func), m_func); - insertBeforeInstr->InsertBefore(movR8); + insertBeforeInstr->InsertBefore(movSpillSize); - // MOV r9, argsSize - IR::Instr *movR9 = IR::Instr::New(Js::OpCode::LdArgSize, - IR::RegOpnd::New(nullptr, RegR9, TyMachReg, m_func), + // MOV REG_EH_ARGS_SIZE, argsSize + IR::Instr *movArgsSize = IR::Instr::New(Js::OpCode::LdArgSize, + IR::RegOpnd::New(nullptr, REG_EH_ARGS_SIZE, TyMachReg, m_func), m_func); - insertBeforeInstr->InsertBefore(movR9); + insertBeforeInstr->InsertBefore(movArgsSize); - // MOV rcx, amd64_ReturnFromCallWithFakeFrame - // PUSH rcx + // MOV REG_EH_TARGET, amd64_ReturnFromCallWithFakeFrame + // PUSH REG_EH_TARGET // RET - IR::Opnd *endCallWithFakeFrame = endCallWithFakeFrame = IR::RegOpnd::New(nullptr, RegRCX, TyMachReg, m_func); + IR::Opnd *endCallWithFakeFrame = endCallWithFakeFrame = + IR::RegOpnd::New(nullptr, REG_EH_TARGET, TyMachReg, m_func); IR::Instr *movTarget = IR::Instr::New(Js::OpCode::MOV, endCallWithFakeFrame, IR::HelperCallOpnd::New(IR::HelperOp_ReturnFromCallWithFakeFrame, m_func), diff --git a/lib/Backend/amd64/LowererMDArch.h b/lib/Backend/amd64/LowererMDArch.h index 07c5d572528..862e6f5b32a 100644 --- a/lib/Backend/amd64/LowererMDArch.h +++ b/lib/Backend/amd64/LowererMDArch.h @@ -58,7 +58,7 @@ class LowererMDArch return Math::FitsInDWord((size_t)opnd->GetMemLoc()); } - IR::Opnd * GetArgSlotOpnd(Js::ArgSlot slotIndex, StackSym * argSym = nullptr); + IR::Opnd * GetArgSlotOpnd(Js::ArgSlot slotIndex, StackSym * argSym = nullptr, bool isHelper = false); IR::Instr * LoadNewScObjFirstArg(IR::Instr * instr, IR::Opnd * dst, ushort extraArgs = 0); IR::Instr * LoadInputParamPtr(IR::Instr *instrInsert, IR::RegOpnd *optionalDstOpnd = nullptr); int32 LowerCallArgs(IR::Instr *callInstr, ushort callFlags, Js::ArgSlot extraParams = 1 /* for function object */, IR::IntConstOpnd **callInfoOpndRef = nullptr); @@ -134,3 +134,6 @@ class LowererMDArch void SetMaxArgSlots(Js::ArgSlot actualCount /*including this*/); }; +#define REG_EH_TARGET RegArg0 +#define REG_EH_SPILL_SIZE RegArg2 +#define REG_EH_ARGS_SIZE RegArg3 diff --git a/lib/Backend/amd64/PeepsMD.cpp b/lib/Backend/amd64/PeepsMD.cpp index dc1e85cf1e6..da12b57f5eb 100644 --- a/lib/Backend/amd64/PeepsMD.cpp +++ b/lib/Backend/amd64/PeepsMD.cpp @@ -18,19 +18,12 @@ PeepsMD::ProcessImplicitRegs(IR::Instr *instr) { if (LowererMD::IsCall(instr)) { - this->peeps->ClearReg(RegRAX); - this->peeps->ClearReg(RegRCX); - this->peeps->ClearReg(RegRDX); - this->peeps->ClearReg(RegR8); - this->peeps->ClearReg(RegR9); - this->peeps->ClearReg(RegR10); - this->peeps->ClearReg(RegR11); - this->peeps->ClearReg(RegXMM0); - this->peeps->ClearReg(RegXMM1); - this->peeps->ClearReg(RegXMM2); - this->peeps->ClearReg(RegXMM3); - this->peeps->ClearReg(RegXMM4); - this->peeps->ClearReg(RegXMM5); +#define REGDAT(Name, Listing, Encode, Type, BitVec) \ + if (!((BitVec) & (RA_CALLEESAVE | RA_DONTALLOCATE))) \ + { \ + this->peeps->ClearReg(Reg ## Name); \ + } +#include "RegList.h" } else if (instr->m_opcode == Js::OpCode::IMUL) { diff --git a/lib/Backend/amd64/Reg.h b/lib/Backend/amd64/Reg.h index 0565b0566b4..924c8c66f07 100644 --- a/lib/Backend/amd64/Reg.h +++ b/lib/Backend/amd64/Reg.h @@ -14,9 +14,29 @@ enum RegNum { #define REGDAT(Name, Listing, Encode, Type, BitVec) Reg ## Name, #include "RegList.h" -#undef REGDAT + RegNumCount, - RegNumCount, // Number of operations +// alias RegArg0, RegArg1, ... +#define REG_INT_ARG(Index, Name) RegArg ## Index = Reg ## Name, +#include "RegList.h" + +// alias RegXmmArg0, RegXmmArg1, ... +#define REG_XMM_ARG(Index, Name) RegXmmArg ## Index = Reg ## Name, +#include "RegList.h" +}; + +// IntArgRegsCount +enum _IntArgRegs { +#define REG_INT_ARG(Index, Name) _RegArg ## Index, +#include "RegList.h" + IntArgRegsCount +}; + +// XmmArgRegsCount +enum _XmmArgRegs { +#define REG_XMM_ARG(Index, Name) _RegXmmArg ## Index, +#include "RegList.h" + XmmArgRegsCount }; #define REGNUM_ISXMMXREG(r) ((r) >= RegXMM0 && (r) <= RegXMM15) diff --git a/lib/Backend/amd64/RegList.h b/lib/Backend/amd64/RegList.h index 22bfb6a4520..7c2cdd3fcf8 100644 --- a/lib/Backend/amd64/RegList.h +++ b/lib/Backend/amd64/RegList.h @@ -6,6 +6,10 @@ // Name Name Encode Type BitVec //------------------------------------------------------------------------ +#ifndef REGDAT +#define REGDAT(Name, Listing, Encode, Type, BitVec) +#endif + // Illegal registers - must be first and have a value of 0 // Internal Name @@ -15,6 +19,7 @@ // / / / Type // / / / / BitVec // / / / / / +#ifdef _WIN32 REGDAT(NOREG, noreg, 0xf, TyIllegal, RA_DONTALLOCATE) REGDAT(RAX, rax, 0, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) REGDAT(RCX, rcx, 1, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) @@ -49,3 +54,81 @@ REGDAT(XMM12, xmm12, 4, TyFloat64, RA_CALLEESAVE) REGDAT(XMM13, xmm13, 5, TyFloat64, RA_CALLEESAVE) REGDAT(XMM14, xmm14, 6, TyFloat64, RA_CALLEESAVE) REGDAT(XMM15, xmm15, 7, TyFloat64, RA_CALLEESAVE) + +#else // System V x64 +REGDAT(NOREG, noreg, 0xf, TyIllegal, RA_DONTALLOCATE) +REGDAT(RAX, rax, 0, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) +REGDAT(RCX, rcx, 1, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) +REGDAT(RDX, rdx, 2, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) +REGDAT(RBX, rbx, 3, TyInt64, RA_CALLEESAVE | RA_BYTEABLE) +REGDAT(RSP, rsp, 4, TyInt64, RA_DONTALLOCATE) +REGDAT(RBP, rbp, 5, TyInt64, RA_DONTALLOCATE) +REGDAT(RSI, rsi, 6, TyInt64, RA_CALLERSAVE) +REGDAT(RDI, rdi, 7, TyInt64, RA_CALLERSAVE) +REGDAT(R8, r8, 0, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) +REGDAT(R9, r9, 1, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) +REGDAT(R10, r10, 2, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) +REGDAT(R11, r11, 3, TyInt64, RA_CALLERSAVE | RA_BYTEABLE) +REGDAT(R12, r12, 4, TyInt64, RA_CALLEESAVE | RA_BYTEABLE) +REGDAT(R13, r13, 5, TyInt64, RA_CALLEESAVE | RA_BYTEABLE) +REGDAT(R14, r14, 6, TyInt64, RA_CALLEESAVE | RA_BYTEABLE) +REGDAT(R15, r15, 7, TyInt64, RA_CALLEESAVE | RA_BYTEABLE) + +REGDAT(XMM0, xmm0, 0, TyFloat64, 0) +REGDAT(XMM1, xmm1, 1, TyFloat64, 0) +REGDAT(XMM2, xmm2, 2, TyFloat64, 0) +REGDAT(XMM3, xmm3, 3, TyFloat64, 0) +REGDAT(XMM4, xmm4, 4, TyFloat64, 0) +REGDAT(XMM5, xmm5, 5, TyFloat64, 0) +REGDAT(XMM6, xmm6, 6, TyFloat64, 0) +REGDAT(XMM7, xmm7, 7, TyFloat64, 0) +REGDAT(XMM8, xmm8, 0, TyFloat64, 0) +REGDAT(XMM9, xmm9, 1, TyFloat64, 0) +REGDAT(XMM10, xmm10, 2, TyFloat64, 0) +REGDAT(XMM11, xmm11, 3, TyFloat64, 0) +REGDAT(XMM12, xmm12, 4, TyFloat64, 0) +REGDAT(XMM13, xmm13, 5, TyFloat64, 0) +REGDAT(XMM14, xmm14, 6, TyFloat64, 0) +REGDAT(XMM15, xmm15, 7, TyFloat64, 0) +#endif // !_WIN32 + +#ifndef REG_INT_ARG +#define REG_INT_ARG(Index, Name) +#endif + +#ifndef REG_XMM_ARG +#define REG_XMM_ARG(Index, Name) +#endif + +#ifdef _WIN32 +REG_INT_ARG(0, RCX) +REG_INT_ARG(1, RDX) +REG_INT_ARG(2, R8) +REG_INT_ARG(3, R9) + +REG_XMM_ARG(0, XMM0) +REG_XMM_ARG(1, XMM1) +REG_XMM_ARG(2, XMM2) +REG_XMM_ARG(3, XMM3) + +#else // System V x64 +REG_INT_ARG(0, RDI) +REG_INT_ARG(1, RSI) +REG_INT_ARG(2, RDX) +REG_INT_ARG(3, RCX) +REG_INT_ARG(4, R8) +REG_INT_ARG(5, R9) + +REG_XMM_ARG(0, XMM0) +REG_XMM_ARG(1, XMM1) +REG_XMM_ARG(2, XMM2) +REG_XMM_ARG(3, XMM3) +REG_XMM_ARG(4, XMM4) +REG_XMM_ARG(5, XMM5) +REG_XMM_ARG(6, XMM6) +REG_XMM_ARG(7, XMM7) +#endif // !_WIN32 + +#undef REGDAT +#undef REG_INT_ARG +#undef REG_XMM_ARG diff --git a/lib/Backend/amd64/Thunks.S b/lib/Backend/amd64/Thunks.S new file mode 100644 index 00000000000..c03c6d72689 --- /dev/null +++ b/lib/Backend/amd64/Thunks.S @@ -0,0 +1,94 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +.intel_syntax noprefix +#include "unixasmmacros.inc" + + +//============================================================================================================ +// Fake __chkstk +//============================================================================================================ +.balign 16 +LEAF_ENTRY __chkstk, _TEXT + ret +LEAF_END __chkstk, _TEXT + +//============================================================================================================ +// NativeCodeGenerator::CheckCodeGenThunk +//============================================================================================================ + +//.extern _ZN19NativeCodeGenerator12CheckCodeGenEPN2Js14ScriptFunctionE +.balign 16 +NESTED_ENTRY _ZN19NativeCodeGenerator17CheckCodeGenThunkEPN2Js16RecyclableObjectENS0_8CallInfoEz, _TEXT, NoHandler + push_nonvol_reg rbp + lea rbp, [rsp] + + // save argument registers used by custom calling convention + push_register rdi + push_register rsi + + // Js::JavascriptMethod NativeCodeGenerator::CheckCodeGen( + // Js::ScriptFunction * function) + // + // RDI == function, setup by custom calling convention + call C_FUNC(_ZN19NativeCodeGenerator12CheckCodeGenEPN2Js14ScriptFunctionE) + + pop_register rsi + pop_register rdi + pop_nonvol_reg rbp + + jmp rax +NESTED_END _ZN19NativeCodeGenerator17CheckCodeGenThunkEPN2Js16RecyclableObjectENS0_8CallInfoEz, _TEXT + + +//============================================================================================================ +// NativeCodeGenerator::CheckAsmJsCodeGenThunk +//============================================================================================================ + +//.extern _ZN19NativeCodeGenerator17CheckAsmJsCodeGenEPN2Js14ScriptFunctionE +.balign 16 +NESTED_ENTRY _ZN19NativeCodeGenerator22CheckAsmJsCodeGenThunkEPN2Js16RecyclableObjectENS0_8CallInfoEz, _TEXT, NoHandler + push_nonvol_reg rbp // push rbp and adjust CFA offset + lea rbp, [rsp] + + set_cfa_register rbp, (2*8) // Set to compute CFA as: rbp + 16 (sizeof: [rbp] [ReturnAddress]) + + // save argument registers used by custom calling convention + push rdi + push rsi + push rdx + push rcx + push r8 + push r9 + + sub rsp, 40h + + // ----- TODO: potentially xmm0-xmm7 args + // spill potential floating point arguments to stack + movaps xmmword ptr [rsp + 00h], xmm0 + movaps xmmword ptr [rsp + 10h], xmm1 + movaps xmmword ptr [rsp + 20h], xmm2 + movaps xmmword ptr [rsp + 30h], xmm3 + + call _ZN19NativeCodeGenerator17CheckAsmJsCodeGenEPN2Js14ScriptFunctionE + + // restore potential floating point arguments from stack + movaps xmm0, xmmword ptr [rsp + 00h] + movaps xmm1, xmmword ptr [rsp + 10h] + movaps xmm2, xmmword ptr [rsp + 20h] + movaps xmm3, xmmword ptr [rsp + 30h] + + add rsp, 40h + + pop r9 + pop r8 + pop rcx + pop rdx + pop rsi + pop rdi + + pop_nonvol_reg rbp + jmp rax +NESTED_END _ZN19NativeCodeGenerator22CheckAsmJsCodeGenThunkEPN2Js16RecyclableObjectENS0_8CallInfoEz, _TEXT diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 5f34badbd4d..f0bb0d7e944 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -1,5 +1,9 @@ add_compile_options(-fPIC) +if(BuildJIT) + add_subdirectory (Backend) +endif() + add_subdirectory (Common) add_subdirectory (Parser) add_subdirectory (Runtime) diff --git a/lib/Common/BackendApi.h b/lib/Common/BackendApi.h index d0888d59197..5e4c815f54e 100644 --- a/lib/Common/BackendApi.h +++ b/lib/Common/BackendApi.h @@ -37,7 +37,7 @@ struct InlinedFrameLayout; typedef intptr_t IntConstType; typedef uintptr_t UIntConstType; -typedef IntMath::Type IntConstMath; +typedef IntMath::Type IntConstMath; typedef double FloatConstType; #include "EmitBuffer.h" @@ -60,7 +60,7 @@ void UpdateNativeCodeGeneratorForDebugMode(NativeCodeGenerator* nativeCodeGen); CriticalSection *GetNativeCodeGenCriticalSection(NativeCodeGenerator *pNativeCodeGen); bool TryReleaseNonHiPriWorkItem(Js::ScriptContext* scriptContext, CodeGenWorkItem* workItem); void NativeCodeGenEnterScriptStart(NativeCodeGenerator * nativeCodeGen); -void FreeNativeCodeGenAllocation(Js::ScriptContext* scriptContext, void* address); +void FreeNativeCodeGenAllocation(Js::ScriptContext* scriptContext, Js::JavascriptMethod address); CodeGenAllocators* GetForegroundAllocator(NativeCodeGenerator * nativeCodeGen, PageAllocator* pageallocator); void GenerateFunction(NativeCodeGenerator * nativeCodeGen, Js::FunctionBody * functionBody, Js::ScriptFunction * function = NULL); void GenerateLoopBody(NativeCodeGenerator * nativeCodeGen, Js::FunctionBody * functionBody, Js::LoopHeader * loopHeader, Js::EntryPointInfo* entryPointInfo, uint localCount, Js::Var localSlots[]); diff --git a/lib/Common/Common.h b/lib/Common/Common.h index 4aca47455a7..ac783999dc0 100644 --- a/lib/Common/Common.h +++ b/lib/Common/Common.h @@ -47,6 +47,7 @@ namespace Js #include "EnumClassHelp.h" #include "Common/Tick.h" +#include "Common/IntMathCommon.h" #include "Common/Int16Math.h" #include "Common/Int32Math.h" #include "Common/UInt16Math.h" diff --git a/lib/Common/Common/CommonCommonPch.h b/lib/Common/Common/CommonCommonPch.h index 1e54de91b1b..b3fd7406951 100644 --- a/lib/Common/Common/CommonCommonPch.h +++ b/lib/Common/Common/CommonCommonPch.h @@ -19,6 +19,10 @@ // === Common Header Files === #include "Common/NumberUtilitiesBase.h" #include "Common/NumberUtilities.h" +#include "Common/IntMathCommon.h" +#include "Common/Int16Math.h" +#include "Common/Int32Math.h" +#include "Common/Int64Math.h" #ifdef _MSC_VER #pragma warning(push) diff --git a/lib/Common/Common/Event.cpp b/lib/Common/Common/Event.cpp index e25845ce1e6..02295331ddf 100644 --- a/lib/Common/Common/Event.cpp +++ b/lib/Common/Common/Event.cpp @@ -5,7 +5,6 @@ #include "CommonCommonPch.h" #include "Common/Event.h" -#ifdef _WIN32 Event::Event(const bool autoReset, const bool signaled) : handle(CreateEvent(0, !autoReset, signaled, 0)) { if(!handle) @@ -19,4 +18,3 @@ bool Event::Wait(const unsigned int milliseconds) const Js::Throw::FatalInternalError(); return result == WAIT_OBJECT_0; } -#endif diff --git a/lib/Common/Common/Event.h b/lib/Common/Common/Event.h index b4d2835e541..9bb42edeb5c 100644 --- a/lib/Common/Common/Event.h +++ b/lib/Common/Common/Event.h @@ -4,8 +4,6 @@ //------------------------------------------------------------------------------------------------------- #pragma once -// xplat-todo: Support this on Linux too, currently tied to CreateEvent API -#ifdef _WIN32 class Event { private: @@ -48,4 +46,3 @@ class Event bool Wait(const unsigned int milliseconds = INFINITE) const; }; -#endif diff --git a/lib/Common/Common/Int16Math.h b/lib/Common/Common/Int16Math.h index ce853189c6c..88389c03d42 100644 --- a/lib/Common/Common/Int16Math.h +++ b/lib/Common/Common/Int16Math.h @@ -2,52 +2,8 @@ // Copyright (C) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- -class Int16Math -{ -public: - template< class Func > - static int16 Add(int16 lhs, int16 rhs, __inout Func& overflowFn) - { - int16 result = lhs + rhs; - - // If the result is smaller than the LHS, then we overflowed - if( result < lhs ) - { - overflowFn(); - } - - return result; - } - - template< class Func > - static void Inc(int16& lhs, __inout Func& overflowFn) - { - ++lhs; +#pragma once - // If lhs becomes 0, then we overflowed - if(!lhs) - { - overflowFn(); - } - } - - // Convenience function which uses DefaultOverflowPolicy (throws OOM when overflow) - static int16 Add(int16 lhs, uint16 rhs) - { - return Add(lhs, rhs, ::Math::DefaultOverflowPolicy); - } - - // Convenience functions which return a bool indicating overflow - static bool Add(int16 lhs, int16 rhs, __out int16* result) - { - ::Math::RecordOverflowPolicy overflowGuard; - *result = Add(lhs, rhs, overflowGuard); - return overflowGuard.HasOverflowed(); - } - - // Convenience function which uses DefaultOverflowPolicy (throws OOM when overflow) - static void Inc(int16& lhs) - { - Inc(lhs, ::Math::DefaultOverflowPolicy); - } +class Int16Math: public IntMathCommon +{ }; diff --git a/lib/Common/Common/Int32Math.cpp b/lib/Common/Common/Int32Math.cpp index 6a46212c05b..c7f77167fc8 100644 --- a/lib/Common/Common/Int32Math.cpp +++ b/lib/Common/Common/Int32Math.cpp @@ -3,27 +3,29 @@ // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- #include "CommonCommonPch.h" -#include "Common/Int32Math.h" bool Int32Math::Add(int32 left, int32 right, int32 *pResult) { - if (sizeof(void *) == 4) - { - // Overflow occurs when the result has a different sign from both the left and right operands - *pResult = left + right; - return ((left ^ *pResult) & (right ^ *pResult)) < 0; - } +#if __has_builtin(__builtin_add_overflow) || TARGET_32 + return IntMathCommon::Add(left, right, pResult); +#else Assert(sizeof(void *) == 8); int64 result64 = (int64)left + (int64)right; *pResult = (int32)result64; return result64 != (int64)(*pResult); + +#endif } bool Int32Math::Mul(int32 left, int32 right, int32 *pResult) { +#if __has_builtin(__builtin_mul_overflow) + return IntMathCommon::Mul(left, right, pResult); +#else + bool fOverflow; #if _M_IX86 __asm @@ -43,6 +45,8 @@ Int32Math::Mul(int32 left, int32 right, int32 *pResult) #endif return fOverflow; + +#endif // !__has_builtin(__builtin_mul_overflow) } bool @@ -79,50 +83,6 @@ Int32Math::Shl(int32 left, int32 right, int32 *pResult) return (left != (int32)((uint32)*pResult >> right)); } -bool -Int32Math::Sub(int32 left, int32 right, int32 *pResult) -{ - if(sizeof(void *) == 4) - { - // Overflow occurs when the result has a different sign from the left operand, and the result has the same sign as the - // right operand - *pResult = left - right; - return ((left ^ *pResult) & ~(right ^ *pResult)) < 0; - } - - Assert(sizeof(void *) == 8); - int64 result64 = (int64)left - (int64)right; - *pResult = (int32)result64; - return result64 != (int64)(*pResult); -} - -bool -Int32Math::Div(int32 left, int32 right, int32 *pResult) -{ - AssertMsg(right != 0, "Divide by zero..."); - - if (right == -1 && left == INT_MIN) - { - //Special check for INT_MIN/-1 - return true; - } - *pResult = left / right; - return false; -} - -bool -Int32Math::Mod(int32 left, int32 right, int32 *pResult) -{ - AssertMsg(right != 0, "Mod by zero..."); - if (right == -1 && left == INT_MIN) - { - //Special check for INT_MIN/-1 - return true; - } - *pResult = left % right; - return false; -} - bool Int32Math::Shr(int32 left, int32 right, int32 *pResult) { @@ -137,61 +97,3 @@ Int32Math::ShrU(int32 left, int32 right, int32 *pResult) *pResult = uResult; return false; } - -bool -Int32Math::And(int32 left, int32 right, int32 *pResult) -{ - *pResult = left & right; - return false; -} - -bool -Int32Math::Or(int32 left, int32 right, int32 *pResult) -{ - *pResult = left | right; - return false; -} - -bool -Int32Math::Xor(int32 left, int32 right, int32 *pResult) -{ - *pResult = left ^ right; - return false; -} - -bool -Int32Math::Neg(int32 val, int32 *pResult) -{ - *pResult = -val; - return *pResult == INT_MIN; -} - -bool -Int32Math::Not(int32 val, int32 *pResult) -{ - *pResult = ~val; - return false; -} - -bool -Int32Math::Inc(int32 val, int32 *pResult) -{ - *pResult = val + 1; - // Overflow if result ends up less than input - return *pResult <= val; -} - -bool -Int32Math::Dec(int32 val, int32 *pResult) -{ - *pResult = val - 1; - // Overflow if result ends up greater than input - return *pResult >= val; -} - -int32 -Int32Math::NearestInRangeTo(const int32 value, const int32 minimum, const int32 maximum) // inclusive -{ - Assert(minimum <= maximum); - return minimum >= value ? minimum : maximum <= value ? maximum : value; -} diff --git a/lib/Common/Common/Int32Math.h b/lib/Common/Common/Int32Math.h index 8c14db175e6..b83f10a821c 100644 --- a/lib/Common/Common/Int32Math.h +++ b/lib/Common/Common/Int32Math.h @@ -2,26 +2,17 @@ // Copyright (C) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- -class Int32Math +#pragma once +#include "IntMathCommon.h" + +class Int32Math: public IntMathCommon { public: static bool Add(int32 left, int32 right, int32 *pResult); - static bool Sub(int32 left, int32 right, int32 *pResult); static bool Mul(int32 left, int32 right, int32 *pResult); static bool Mul(int32 left, int32 right, int32 *pResult, int32* pOverflowValue); - static bool Div(int32 left, int32 right, int32 *pResult); - static bool Mod(int32 left, int32 right, int32 *pResult); + static bool Shl(int32 left, int32 right, int32 *pResult); static bool Shr(int32 left, int32 right, int32 *pResult); static bool ShrU(int32 left, int32 right, int32 *pResult); - static bool And(int32 left, int32 right, int32 *pResult); - static bool Or(int32 left, int32 right, int32 *pResult); - static bool Xor(int32 left, int32 right, int32 *pResult); - - static bool Neg(int32 val, int32 *pResult); - static bool Not(int32 val, int32 *pResult); - static bool Inc(int32 val, int32 *pResult); - static bool Dec(int32 val, int32 *pResult); - - static int32 NearestInRangeTo(const int32 value, const int32 minimum, const int32 maximum); // inclusive }; diff --git a/lib/Common/Common/Int64Math.cpp b/lib/Common/Common/Int64Math.cpp index cc218eab26d..e57072c1ce0 100644 --- a/lib/Common/Common/Int64Math.cpp +++ b/lib/Common/Common/Int64Math.cpp @@ -25,18 +25,14 @@ #endif #endif -bool -Int64Math::Add(int64 left, int64 right, int64 *pResult) -{ - // Overflow occurs when the result has a different sign from both the left and right operands - *pResult = left + right; - return ((left ^ *pResult) & (right ^ *pResult)) < 0; -} - // Returns true if we overflowed, false if we didn't bool Int64Math::Mul(int64 left, int64 right, int64 *pResult) { +#if __has_builtin(__builtin_mul_overflow) + return IntMathCommon::Mul(left, right, pResult); +#else + #if defined(_M_X64) int64 high; *pResult = _mul128(left, right, &high); @@ -45,6 +41,8 @@ Int64Math::Mul(int64 left, int64 right, int64 *pResult) *pResult = left * right; return (left != 0 && right != 0 && (*pResult / left) != right); #endif + +#endif // !__has_builtin(__builtin_mul_overflow) } bool @@ -54,43 +52,6 @@ Int64Math::Shl(int64 left, int64 right, int64 *pResult) return (left != (int64)((uint64)*pResult >> right)); } -bool -Int64Math::Sub(int64 left, int64 right, int64 *pResult) -{ - // Overflow occurs when the result has a different sign from the left operand, and the result has the same sign as the - // right operand - *pResult = left - right; - return ((left ^ *pResult) & ~(right ^ *pResult)) < 0; -} - -bool -Int64Math::Div(int64 left, int64 right, int64 *pResult) -{ - AssertMsg(right != 0, "Divide by zero..."); - - if (right == -1 && left == INT64_MIN) - { - //Special check for INT64_MIN/-1 - return true; - } - - *pResult = left / right; - return false; -} - -bool -Int64Math::Mod(int64 left, int64 right, int64 *pResult) -{ - AssertMsg(right != 0, "Mod by zero..."); - if (right == -1 && left == INT64_MIN) - { - //Special check for INT64_MIN/-1 - return true; - } - *pResult = left % right; - return false; -} - bool Int64Math::Shr(int64 left, int64 right, int64 *pResult) { @@ -105,70 +66,3 @@ Int64Math::ShrU(int64 left, int64 right, int64 *pResult) *pResult = uResult; return false; } - -bool -Int64Math::And(int64 left, int64 right, int64 *pResult) -{ - *pResult = left & right; - return false; -} - -bool -Int64Math::Or(int64 left, int64 right, int64 *pResult) -{ - *pResult = left | right; - return false; -} - -bool -Int64Math::Xor(int64 left, int64 right, int64 *pResult) -{ - *pResult = left ^ right; - return false; -} - -bool -Int64Math::Neg(int64 val, int64 *pResult) -{ - *pResult = -val; - return *pResult == INT64_MIN; -} - -bool -Int64Math::Not(int64 val, int64 *pResult) -{ - *pResult = ~val; - return false; -} - -bool -Int64Math::Inc(int64 val, int64 *pResult) -{ - *pResult = val + 1; - // Overflow if result ends up less than input - return *pResult <= val; -} - -bool -Int64Math::Dec(int64 val, int64 *pResult) -{ - *pResult = val - 1; - // Overflow if result ends up greater than input - return *pResult >= val; -} - -uint64 -Int64Math::Log2(int64 val) -{ - uint64 uval = (uint64)val; - uint64 ret; - for (ret = 0; uval >>= 1; ret++); - return ret; -} - -int64 -Int64Math::NearestInRangeTo(const int64 value, const int64 minimum, const int64 maximum) // inclusive -{ - Assert(minimum <= maximum); - return minimum >= value ? minimum : maximum <= value ? maximum : value; -} diff --git a/lib/Common/Common/Int64Math.h b/lib/Common/Common/Int64Math.h index 11072b0c814..7e93ce93891 100644 --- a/lib/Common/Common/Int64Math.h +++ b/lib/Common/Common/Int64Math.h @@ -2,27 +2,13 @@ // Copyright (C) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- -class Int64Math +#pragma once + +class Int64Math: public IntMathCommon { public: - static bool Add(int64 left, int64 right, int64 *pResult); - static bool Sub(int64 left, int64 right, int64 *pResult); static bool Mul(int64 left, int64 right, int64 *pResult); - static bool Div(int64 left, int64 right, int64 *pResult); - static bool Mod(int64 left, int64 right, int64 *pResult); static bool Shl(int64 left, int64 right, int64 *pResult); static bool Shr(int64 left, int64 right, int64 *pResult); static bool ShrU(int64 left, int64 right, int64 *pResult); - static bool And(int64 left, int64 right, int64 *pResult); - static bool Or(int64 left, int64 right, int64 *pResult); - static bool Xor(int64 left, int64 right, int64 *pResult); - - static bool Neg(int64 val, int64 *pResult); - static bool Not(int64 val, int64 *pResult); - static bool Inc(int64 val, int64 *pResult); - static bool Dec(int64 val, int64 *pResult); - - static uint64 Log2(int64 val); - - static int64 NearestInRangeTo(const int64 value, const int64 minimum, const int64 maximum); // inclusive }; diff --git a/lib/Common/Common/IntMathCommon.h b/lib/Common/Common/IntMathCommon.h new file mode 100644 index 00000000000..a4ab1f2db2f --- /dev/null +++ b/lib/Common/Common/IntMathCommon.h @@ -0,0 +1,192 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- +#pragma once + +template +struct SignedTypeTraits +{ + typedef void UnsignedType; +}; + +template <> +struct SignedTypeTraits +{ + typedef uint16 UnsignedType; + static const int16 MinValue = INT16_MIN; +}; +template <> +struct SignedTypeTraits +{ + typedef uint32 UnsignedType; + static const int32 MinValue = INT32_MIN; +}; +template <> +struct SignedTypeTraits +{ + typedef uint64 UnsignedType; + static const int64 MinValue = INT64_MIN; +}; + +template +class IntMathCommon +{ +public: + typedef typename SignedTypeTraits::UnsignedType UnsignedType; + static const T MinValue = SignedTypeTraits::MinValue; + + static bool Add(T left, T right, T *pResult); + static bool Sub(T left, T right, T *pResult); + static bool Mul(T left, T right, T *pResult); + static bool Div(T left, T right, T *pResult); + static bool Mod(T left, T right, T *pResult); + static bool And(T left, T right, T *pResult); + static bool Or(T left, T right, T *pResult); + static bool Xor(T left, T right, T *pResult); + + static bool Neg(T val, T *pResult); + static bool Not(T val, T *pResult); + static bool Inc(T val, T *pResult); + static bool Dec(T val, T *pResult); + + static T NearestInRangeTo(const T value, const T minimum, const T maximum); // inclusive +}; + + +template +bool IntMathCommon::Add(T left, T right, T *pResult) +{ +#if __has_builtin(__builtin_add_overflow) + return __builtin_add_overflow(left, right, pResult); +#else + // Overflow occurs when the result has a different sign from both the + // left and right operands + *pResult = static_cast( + static_cast(left) + static_cast(right)); + return ((left ^ *pResult) & (right ^ *pResult)) < 0; +#endif +} + +template +bool IntMathCommon::Sub(T left, T right, T *pResult) +{ +#if __has_builtin(__builtin_sub_overflow) + return __builtin_sub_overflow(left, right, pResult); +#else + // Overflow occurs when the result has a different sign from the left + // operand, and the result has the same sign as the right operand + *pResult = static_cast( + static_cast(left) - static_cast(right)); + return ((left ^ *pResult) & ~(right ^ *pResult)) < 0; +#endif +} + +#if __has_builtin(__builtin_mul_overflow) +template +bool IntMathCommon::Mul(T left, T right, T *pResult) +{ + return __builtin_mul_overflow(left, right, pResult); +} +#endif + +template +bool IntMathCommon::Div(T left, T right, T *pResult) +{ + AssertMsg(right != 0, "Divide by zero..."); + + if (right == -1 && left == MinValue) + { + // Special check for MinValue/-1 + return true; + } + + *pResult = left / right; + return false; +} + +template +bool IntMathCommon::Mod(T left, T right, T *pResult) +{ + AssertMsg(right != 0, "Mod by zero..."); + if (right == -1 && left == MinValue) + { + //Special check for MinValue/-1 + return true; + } + *pResult = left % right; + return false; +} + +template +bool IntMathCommon::And(T left, T right, T *pResult) +{ + *pResult = left & right; + return false; +} + +template +bool IntMathCommon::Or(T left, T right, T *pResult) +{ + *pResult = left | right; + return false; +} + +template +bool IntMathCommon::Xor(T left, T right, T *pResult) +{ + *pResult = left ^ right; + return false; +} + +template +bool IntMathCommon::Neg(T val, T *pResult) +{ + if (val == MinValue) + { + *pResult = val; + return true; + } + *pResult = - val; + return false; +} + +template +bool IntMathCommon::Not(T val, T *pResult) +{ + *pResult = ~val; + return false; +} + +template +bool IntMathCommon::Inc(T val, T *pResult) +{ +#if __has_builtin(__builtin_add_overflow) + return __builtin_add_overflow(val, 1, pResult); +#else + *pResult = static_cast( + static_cast(val) + static_cast(1)); + // Overflow if result ends up less than input + return *pResult <= val; +#endif +} + +template +bool IntMathCommon::Dec(T val, T *pResult) +{ +#if __has_builtin(__builtin_sub_overflow) + return __builtin_sub_overflow(val, 1, pResult); +#else + *pResult = static_cast( + static_cast(val) - static_cast(1)); + // Overflow if result ends up greater than input + return *pResult >= val; +#endif +} + +template +T IntMathCommon::NearestInRangeTo(const T value, const T minimum, const T maximum) // inclusive +{ + Assert(minimum <= maximum); + return minimum >= value ? minimum : maximum <= value ? maximum : value; +} diff --git a/lib/Common/Common/Jobs.cpp b/lib/Common/Common/Jobs.cpp index 5627bb54ca9..e3000cf9d0e 100644 --- a/lib/Common/Common/Jobs.cpp +++ b/lib/Common/Common/Jobs.cpp @@ -833,7 +833,7 @@ namespace JsUtil { Assert(manager); - ParallelThreadData *threadDataProcessingCurrentJob = nullptr; + ParallelThreadData *threadDataProcessingCurrentJob = nullptr; { AutoCriticalSection lock(&criticalSection); // Managers must remove themselves. Hence, Close does not remove managers. So, not asserting on !IsClosed(). diff --git a/lib/Common/Common/Jobs.inl b/lib/Common/Common/Jobs.inl index 4bee3041048..406bc3fb735 100644 --- a/lib/Common/Common/Jobs.inl +++ b/lib/Common/Common/Jobs.inl @@ -13,7 +13,7 @@ namespace JsUtil template void JobProcessor::PrioritizeManagerAndWait(TJobManager *const manager, const unsigned int milliseconds) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(!isClosed); @@ -31,7 +31,7 @@ namespace JsUtil template void JobProcessor::AddJobAndProcessProactively(TJobManager *const manager, const TJobHolder holder) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(!isClosed); @@ -49,7 +49,7 @@ namespace JsUtil template bool JobProcessor::PrioritizeJob(TJobManager *const manager, const TJobHolder holder, void* function) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(!isClosed); @@ -89,7 +89,7 @@ namespace JsUtil template void ForegroundJobProcessor::AddJobAndProcessProactively(TJobManager *const manager, const TJobHolder holder) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(!IsClosed()); @@ -106,7 +106,7 @@ namespace JsUtil template void ForegroundJobProcessor::PrioritizeManagerAndWait(TJobManager *const manager, const unsigned int milliseconds) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(manager->isWaitable); Assert(!IsClosed()); @@ -154,7 +154,7 @@ namespace JsUtil template bool ForegroundJobProcessor::PrioritizeJob(TJobManager *const manager, const TJobHolder holder, void* function) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(!IsClosed()); @@ -219,7 +219,7 @@ namespace JsUtil template void BackgroundJobProcessor::AddJobAndProcessProactively(TJobManager *const manager, const TJobHolder holder) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(!IsClosed()); @@ -231,7 +231,7 @@ namespace JsUtil template void BackgroundJobProcessor::PrioritizeManagerAndWait(TJobManager *const manager, const unsigned int milliseconds) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(manager->isWaitable); @@ -316,7 +316,7 @@ namespace JsUtil template bool BackgroundJobProcessor::PrioritizeJob(TJobManager *const manager, const TJobHolder holder, void* function) { - TemplateParameter::SameOrDerivedFrom; + TemplateParameter::SameOrDerivedFrom unused; Assert(manager); Assert(!IsClosed()); diff --git a/lib/Common/CommonDefines.h b/lib/Common/CommonDefines.h index a03786df33d..18c05097016 100644 --- a/lib/Common/CommonDefines.h +++ b/lib/Common/CommonDefines.h @@ -183,6 +183,12 @@ #endif #endif +#if ENABLE_NATIVE_CODEGEN +#ifdef _WIN32 +#define ENABLE_OOP_NATIVE_CODEGEN 1 // Out of process JIT +#endif +#endif + // Other features // #define CHAKRA_CORE_DOWN_COMPAT 1 @@ -576,7 +582,6 @@ #endif #endif -#if _WIN32 || _WIN64 #if _M_IX86 #define I386_ASM 1 #endif //_M_IX86 @@ -590,7 +595,6 @@ #define ALLOC_XDATA (false) #endif #endif -#endif // _WIN32 || _WIN64 #ifndef _WIN32 #define DISABLE_SEH 1 diff --git a/lib/Common/CommonPal.h b/lib/Common/CommonPal.h index 84b47ac3640..83523ed0665 100644 --- a/lib/Common/CommonPal.h +++ b/lib/Common/CommonPal.h @@ -47,6 +47,10 @@ #define CLANG_WNO_END #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + #ifdef _WIN32 #pragma warning(push) #pragma warning(disable: 4995) /* 'function': name was marked as #pragma deprecated */ @@ -83,6 +87,7 @@ __forceinline void __int2c() #define STRSAFE_INLINE 1 #ifdef PAL_STDCPP_COMPAT +#include #include #include #include @@ -96,6 +101,8 @@ __forceinline void __int2c() typedef char16_t char16; #define _u(s) u##s + +typedef GUID UUID; #define INIT_PRIORITY(x) __attribute__((init_priority(x))) #ifdef PAL_STDCPP_COMPAT @@ -134,12 +141,11 @@ inline void DebugBreak() // These are not available in pal #define fwprintf_s fwprintf -// sprintf_s overloaded in safecrt.h. Not sure why palrt.h redefines sprintf_s. -#undef sprintf_s -// #define sprintf_s PAL_sprintf_s // PAL LoadLibraryExW not supported #define LOAD_LIBRARY_SEARCH_SYSTEM32 0 +// winnt.h +#define FAST_FAIL_INVALID_ARG 5 // winerror.h #define FACILITY_JSCRIPT 2306 #define JSCRIPT_E_CANTEXECUTE _HRESULT_TYPEDEF_(0x89020001L) @@ -391,13 +397,6 @@ inline __int64 _abs64(__int64 n) return n < 0 ? -n : n; } -// xplat-todo: implement this for JIT and Concurrent/Partial GC -BOOL WINAPI GetModuleHandleEx( - _In_ DWORD dwFlags, - _In_opt_ LPCTSTR lpModuleName, - _Out_ HMODULE *phModule -); - int GetCurrentThreadStackLimits(ULONG_PTR* lowLimit, ULONG_PTR* highLimit); bool IsAddressOnStack(ULONG_PTR address); @@ -412,6 +411,30 @@ inline char16* wmemset(char16* wcs, char16 wc, size_t n) return wcs; } +inline errno_t wmemcpy_s(char16* dest, size_t destSize, const char16* src, size_t count) +{ + return memcpy_s(dest, sizeof(char16) * destSize, src, sizeof(char16) * count); +} + +inline int _wunlink(const char16* filename) +{ + // WARN: does not set errno when fail + return DeleteFile(filename) ? 0 : -1; +} + +template +inline errno_t _wcserror_s(char16 (&buffer)[size], int errnum) +{ + const char* str = strerror(errnum); + // WARN: does not return detail errno when fail + return MultiByteToWideChar(CP_ACP, 0, str, -1, buffer, size) ? 0 : -1; +} + +#define midl_user_allocate(size) \ + HeapAlloc(GetProcessHeap(), 0, (size)) +#define midl_user_free(ptr) \ + if (ptr != NULL) { HeapFree(GetProcessHeap(), NULL, ptr); } + DWORD __cdecl CharLowerBuffW(const char16* lpsz, DWORD cchLength); DWORD __cdecl CharUpperBuffW(const char16* lpsz, DWORD cchLength); diff --git a/lib/Common/Core/DelayLoadLibrary.cpp b/lib/Common/Core/DelayLoadLibrary.cpp index 6e4148d267b..ca812c28d49 100644 --- a/lib/Common/Core/DelayLoadLibrary.cpp +++ b/lib/Common/Core/DelayLoadLibrary.cpp @@ -50,7 +50,7 @@ bool DelayLoadLibrary::IsAvailable() return m_hModule != nullptr; } -#if PDATA_ENABLED +#if PDATA_ENABLED && _WIN32 static NtdllLibrary NtdllLibraryObject; NtdllLibrary* NtdllLibrary::Instance = &NtdllLibraryObject; diff --git a/lib/Common/Core/DelayLoadLibrary.h b/lib/Common/Core/DelayLoadLibrary.h index b454624faa7..7256c426e6d 100644 --- a/lib/Common/Core/DelayLoadLibrary.h +++ b/lib/Common/Core/DelayLoadLibrary.h @@ -25,7 +25,7 @@ class DelayLoadLibrary }; -#if PDATA_ENABLED +#if PDATA_ENABLED && _WIN32 // This needs to be delay loaded because it is available on // Win8 only diff --git a/lib/Common/DataStructures/BaseDictionary.h b/lib/Common/DataStructures/BaseDictionary.h index 27b3d692645..194e8a6613a 100644 --- a/lib/Common/DataStructures/BaseDictionary.h +++ b/lib/Common/DataStructures/BaseDictionary.h @@ -1306,15 +1306,15 @@ namespace JsUtil { Assert(dictionary.buckets == buckets); Assert(dictionary.bucketCount == bucketCount); - Assert(entryIndex >= -1); - Assert(entryIndex < dictionary.count); + Assert(this->entryIndex >= -1); + Assert(this->entryIndex < dictionary.count); Assert(bucketIndex == 0u - 1 || bucketIndex <= bucketCount); Assert(previousEntryIndexInBucket >= -2); Assert(previousEntryIndexInBucket < dictionary.count); Assert(indexOfEntryAfterRemovedEntry >= -2); Assert(indexOfEntryAfterRemovedEntry < dictionary.count); - return Base::IsValid() && entryIndex >= 0; + return Base::IsValid() && this->entryIndex >= 0; } public: @@ -1322,13 +1322,13 @@ namespace JsUtil { if(IsValid()) { - previousEntryIndexInBucket = entryIndex; - entryIndex = Current().next; + previousEntryIndexInBucket = this->entryIndex; + this->entryIndex = this->Current().next; } else { Assert(indexOfEntryAfterRemovedEntry >= -1); - entryIndex = indexOfEntryAfterRemovedEntry; + this->entryIndex = indexOfEntryAfterRemovedEntry; } if(!IsValid()) @@ -1344,7 +1344,7 @@ namespace JsUtil while(++bucketIndex < bucketCount) { - entryIndex = buckets[bucketIndex]; + this->entryIndex = buckets[bucketIndex]; if(IsValid()) { previousEntryIndexInBucket = -1; @@ -1358,10 +1358,10 @@ namespace JsUtil { Assert(previousEntryIndexInBucket >= -1); - indexOfEntryAfterRemovedEntry = Current().next; - dictionary.RemoveAt(entryIndex, previousEntryIndexInBucket, bucketIndex); - OnEntryRemoved(); - entryIndex = -1; + indexOfEntryAfterRemovedEntry = this->Current().next; + dictionary.RemoveAt(this->entryIndex, previousEntryIndexInBucket, bucketIndex); + this->OnEntryRemoved(); + this->entryIndex = -1; } }; @@ -1525,12 +1525,12 @@ namespace JsUtil BaseHashSet *Clone() { - return AllocatorNew(AllocatorType, alloc, BaseHashSet, *this); + return AllocatorNew(AllocatorType, this->alloc, BaseHashSet, *this); } void Copy(const BaseHashSet *const other) { - DoCopy(other); + this->DoCopy(other); } void LockResize() diff --git a/lib/Common/DataStructures/List.h b/lib/Common/DataStructures/List.h index d70f6e28878..99ac2a79e50 100644 --- a/lib/Common/DataStructures/List.h +++ b/lib/Common/DataStructures/List.h @@ -200,6 +200,7 @@ namespace JsUtil { public: typedef ReadOnlyList ParentType; + typedef typename ParentType::TComparerType TComparerType; typedef T TElementType; // For TRemovePolicy static const int DefaultIncrement = 4; @@ -532,7 +533,7 @@ namespace JsUtil template void MapAddress(TMapFunction map) const { - for (int i = 0; i < count; i++) + for (int i = 0; i < this->count; i++) { if (TRemovePolicyType::IsItemValid(this->buffer[i])) { @@ -556,7 +557,7 @@ namespace JsUtil template void ReverseMap(TMapFunction map) { - for (int i = count - 1; i >= 0; i--) + for (int i = this->count - 1; i >= 0; i--) { if (TRemovePolicyType::IsItemValid(this->buffer[i])) { diff --git a/lib/Common/Memory/CMakeLists.txt b/lib/Common/Memory/CMakeLists.txt index 4826bb6ca26..e67d0bf1473 100644 --- a/lib/Common/Memory/CMakeLists.txt +++ b/lib/Common/Memory/CMakeLists.txt @@ -39,12 +39,10 @@ set (CCM_SOURCE_FILES ${CCM_SOURCE_FILES} ) if(CC_TARGETS_AMD64) - # xplat-todo: Include platform\XDataAllocator.cpp - # Needed on windows, need a replacement for linux to do - # amd64 stack walking set (CCM_SOURCE_FILES ${CCM_SOURCE_FILES} + amd64/XDataAllocator.cpp amd64/amd64_SAVE_REGISTERS.S - ) + ) endif() add_library (Chakra.Common.Memory OBJECT diff --git a/lib/Common/Memory/CustomHeap.h b/lib/Common/Memory/CustomHeap.h index 45643749fa5..0812fdd5645 100644 --- a/lib/Common/Memory/CustomHeap.h +++ b/lib/Common/Memory/CustomHeap.h @@ -115,9 +115,7 @@ struct Allocation } return allocator; } - #endif - }; // Wrapper for the two HeapPageAllocator with and without the prereserved segment. @@ -400,7 +398,7 @@ class Heap void DecommitAll(); void FreeAll(); bool IsInHeap(__in void* address); - + // A page should be in full list if: // 1. It does not have any space // 2. Parent segment cannot allocate any more XDATA diff --git a/lib/Common/Memory/PageAllocator.cpp b/lib/Common/Memory/PageAllocator.cpp index 490ce9b5790..d2117ed35d6 100644 --- a/lib/Common/Memory/PageAllocator.cpp +++ b/lib/Common/Memory/PageAllocator.cpp @@ -30,6 +30,14 @@ SegmentBase::~SegmentBase() { Assert(this->allocator != nullptr); + // Cleanup secondaryAllocator before releasing pages so the destructor + // still has access to segment memory. + if(this->secondaryAllocator) + { + this->secondaryAllocator->Delete(); + this->secondaryAllocator = nullptr; + } + if (this->address) { char* originalAddress = this->address - (leadingGuardPageCount * AutoSystemInfo::PageSize); @@ -39,12 +47,6 @@ SegmentBase::~SegmentBase() RecyclerWriteBarrierManager::OnSegmentFree(this->address, this->segmentPageCount); #endif } - - if(this->secondaryAllocator) - { - this->secondaryAllocator->Delete(); - this->secondaryAllocator = nullptr; - } } template diff --git a/lib/Common/Memory/PageAllocator.h b/lib/Common/Memory/PageAllocator.h index a27d1cfbee1..0f22d0492f0 100644 --- a/lib/Common/Memory/PageAllocator.h +++ b/lib/Common/Memory/PageAllocator.h @@ -376,8 +376,8 @@ class PageAllocation template class PageAllocatorBase { - friend class CodeGenNumberThreadAllocator; - friend struct XProcNumberPageSegmentManager; + friend class ::CodeGenNumberThreadAllocator; + friend struct ::XProcNumberPageSegmentManager; // Allowing recycler to report external memory allocation. friend class Recycler; public: diff --git a/lib/Common/Memory/Recycler.h b/lib/Common/Memory/Recycler.h index 2bea0fe72b1..9a6501bf500 100644 --- a/lib/Common/Memory/Recycler.h +++ b/lib/Common/Memory/Recycler.h @@ -624,8 +624,8 @@ class Recycler friend class ActiveScriptProfilerHeapEnum; #endif friend class ScriptEngineBase; // This is for disabling GC for certain Host operations. - friend class CodeGenNumberThreadAllocator; - friend struct XProcNumberPageSegmentManager; + friend class ::CodeGenNumberThreadAllocator; + friend struct ::XProcNumberPageSegmentManager; public: static const uint ConcurrentThreadStackSize = 300000; static const bool FakeZeroLengthArray = true; @@ -2048,7 +2048,7 @@ class RecyclerHeapObjectInfo void* GetObjectAddress() const { return m_address; } #ifdef RECYCLER_PAGE_HEAP - bool IsPageHeapAlloc() + bool IsPageHeapAlloc() { return isUsingLargeHeapBlock && ((LargeHeapBlock*)m_heapBlock)->InPageHeapMode(); } diff --git a/lib/Common/Memory/amd64/XDataAllocator.cpp b/lib/Common/Memory/amd64/XDataAllocator.cpp index b8780de3a18..3cfa413bcee 100644 --- a/lib/Common/Memory/amd64/XDataAllocator.cpp +++ b/lib/Common/Memory/amd64/XDataAllocator.cpp @@ -66,9 +66,17 @@ bool XDataAllocator::Alloc(ULONG_PTR functionStart, DWORD functionSize, ushort p } else { + xdata->address = nullptr; OUTPUT_TRACE(Js::XDataAllocatorPhase, _u("No space for XDATA.\n")); } +#ifndef _WIN32 + if (xdata->address) + { + ClearHead(xdata->address); // mark empty .eh_frame + } +#endif + return xdata->address != nullptr; } @@ -111,6 +119,7 @@ void XDataAllocator::ClearFreeList() /* static */ void XDataAllocator::Register(XDataAllocation * xdataInfo, ULONG_PTR functionStart, DWORD functionSize) { +#ifdef _WIN32 ULONG_PTR baseAddress = functionStart; xdataInfo->pdata.BeginAddress = (DWORD)(functionStart - baseAddress); xdataInfo->pdata.EndAddress = (DWORD)(xdataInfo->pdata.BeginAddress + functionSize); @@ -143,11 +152,17 @@ void XDataAllocator::Register(XDataAllocation * xdataInfo, ULONG_PTR functionSta RUNTIME_FUNCTION *runtimeFunction = RtlLookupFunctionEntry((DWORD64)functionStart, &imageBase, nullptr); Assert(runtimeFunction != NULL); #endif + +#else // !_WIN32 + Assert(ReadHead(xdataInfo->address)); // should be non-empty .eh_frame + __register_frame(xdataInfo->address); +#endif } /* static */ void XDataAllocator::Unregister(XDataAllocation * xdataInfo) { +#ifdef _WIN32 // Delete the table if (AutoSystemInfo::Data.IsWin8OrLater()) { @@ -159,4 +174,8 @@ void XDataAllocator::Unregister(XDataAllocation * xdataInfo) Assert(success); } +#else // !_WIN32 + Assert(ReadHead(xdataInfo->address)); // should be non-empty .eh_frame + __deregister_frame(xdataInfo->address); +#endif } diff --git a/lib/Common/Memory/amd64/XDataAllocator.h b/lib/Common/Memory/amd64/XDataAllocator.h index 9497b94e0c8..c47c10ebf5d 100644 --- a/lib/Common/Memory/amd64/XDataAllocator.h +++ b/lib/Common/Memory/amd64/XDataAllocator.h @@ -7,9 +7,18 @@ CompileAssert(false) #endif #pragma once +#ifndef _WIN32 +extern "C" void __register_frame(const void* ehframe); +extern "C" void __deregister_frame(const void* ehframe); +#endif + namespace Memory { +#ifdef _WIN32 #define XDATA_SIZE (72) +#else +#define XDATA_SIZE (0x80) +#endif struct XDataAllocation : public SecondaryAllocation { @@ -24,8 +33,11 @@ struct XDataAllocation : public SecondaryAllocation { address = nullptr; } + +#ifdef _WIN32 RUNTIME_FUNCTION pdata; FunctionTableHandle functionTable; +#endif }; // @@ -69,15 +81,28 @@ class XDataAllocator sealed : public SecondaryAllocator void Release(const SecondaryAllocation& address); bool CanAllocate(); - static void XDataAllocator::Register(XDataAllocation * xdataInfo, ULONG_PTR functionStart, DWORD functionSize); + static void Register(XDataAllocation * xdataInfo, ULONG_PTR functionStart, DWORD functionSize); static void Unregister(XDataAllocation * xdataInfo); // -------- Private helpers ---------/ private: BYTE* End() { return start + size; } - void ClearFreeList(); - void PreparePdata(XDataAllocation* const xdata, ULONG_PTR functionStart, DWORD functionSize); +#ifndef _WIN32 + // Read .eh_frame data head (length record). 0 means empty. + static uint32 ReadHead(const void* p) + { + return *reinterpret_cast(p); + } + // Clear .eh_frame data head (length record). Set to 0 to mark empty. + static void ClearHead(void* p) + { + *reinterpret_cast(p) = 0; + } +#endif + + void ClearFreeList(); }; + } diff --git a/lib/JITClient/JITManager.h b/lib/JITClient/JITManager.h index 8d0b64c81d8..1f924393a84 100644 --- a/lib/JITClient/JITManager.h +++ b/lib/JITClient/JITManager.h @@ -5,6 +5,7 @@ #pragma once +#if ENABLE_OOP_NATIVE_CODEGEN class JITManager { public: @@ -96,3 +97,101 @@ class JITManager static JITManager s_jitManager; }; + +#else // !ENABLE_OOP_NATIVE_CODEGEN +class JITManager +{ +public: + HRESULT ConnectRpcServer(__in HANDLE jitProcessHandle, __in_opt void* serverSecurityDescriptor, __in UUID connectionUuid) + { Assert(false); return E_FAIL; } + + bool IsConnected() const { return false; } + bool IsJITServer() const { return false; } + void SetIsJITServer() { Assert(false); } + bool IsOOPJITEnabled() const { return false; } + void EnableOOPJIT() { Assert(false); } + + HANDLE GetJITTargetHandle() const + { Assert(false); return HANDLE(); } + + HRESULT InitializeThreadContext( + __in ThreadContextDataIDL * data, + __out intptr_t *threadContextInfoAddress, + __out intptr_t *prereservedRegionAddr) + { Assert(false); return E_FAIL; } + + HRESULT CleanupThreadContext( + __in intptr_t threadContextInfoAddress) + { Assert(false); return E_FAIL; } + + HRESULT UpdatePropertyRecordMap( + __in intptr_t threadContextInfoAddress, + __in UpdatedPropertysIDL * updatedProps) + { Assert(false); return E_FAIL; } + + HRESULT AddDOMFastPathHelper( + __in intptr_t scriptContextInfoAddress, + __in intptr_t funcInfoAddr, + __in int helper) + { Assert(false); return E_FAIL; } + + HRESULT AddModuleRecordInfo( + /* [in] */ intptr_t scriptContextInfoAddress, + /* [in] */ unsigned int moduleId, + /* [in] */ intptr_t localExportSlotsAddr) + { Assert(false); return E_FAIL; } + + HRESULT SetWellKnownHostTypeId( + __in intptr_t threadContextRoot, + __in int typeId) + { Assert(false); return E_FAIL; } + + HRESULT InitializeScriptContext( + __in ScriptContextDataIDL * data, + __out intptr_t *scriptContextInfoAddress) + { Assert(false); return E_FAIL; } + + HRESULT CleanupProcess() + { Assert(false); return E_FAIL; } + + HRESULT CleanupScriptContext( + __in intptr_t scriptContextInfoAddress) + { Assert(false); return E_FAIL; } + + HRESULT CloseScriptContext( + __in intptr_t scriptContextInfoAddress) + { Assert(false); return E_FAIL; } + + HRESULT FreeAllocation( + __in intptr_t threadContextInfoAddress, + __in intptr_t address) + { Assert(false); return E_FAIL; } + + HRESULT SetIsPRNGSeeded( + __in intptr_t scriptContextInfoAddress, + __in boolean value) + { Assert(false); return E_FAIL; } + + HRESULT IsNativeAddr( + __in intptr_t threadContextInfoAddress, + __in intptr_t address, + __out boolean * result) + { Assert(false); return E_FAIL; } + + HRESULT RemoteCodeGenCall( + __in CodeGenWorkItemIDL *workItemData, + __in intptr_t threadContextInfoAddress, + __in intptr_t scriptContextInfoAddress, + __out JITOutputIDL *jitData) + { Assert(false); return E_FAIL; } + + HRESULT Shutdown() + { Assert(false); return E_FAIL; } + + static JITManager * GetJITManager() + { return &s_jitManager; } + +private: + static JITManager s_jitManager; +}; +#endif // !ENABLE_OOP_NATIVE_CODEGEN diff --git a/lib/Jsrt/CMakeLists.txt b/lib/Jsrt/CMakeLists.txt index 28268cd0e68..ac3dc0d28c7 100644 --- a/lib/Jsrt/CMakeLists.txt +++ b/lib/Jsrt/CMakeLists.txt @@ -1,3 +1,7 @@ +if(BuildJIT) + set(chakra_backend_objects $) +endif() + add_library (Chakra.Jsrt STATIC Jsrt.cpp JsrtDebugUtils.cpp @@ -14,7 +18,8 @@ add_library (Chakra.Jsrt STATIC JsrtSourceHolder.cpp JsrtThreadService.cpp $ -# Do not take this in. We need to control the + ${chakra_backend_objects} +# Do not take this in. We need to control the # linker order because of global constructors # and cross dependencies among them # $ @@ -36,12 +41,14 @@ add_library (Chakra.Jsrt STATIC ) add_subdirectory(Core) - + target_include_directories ( Chakra.Jsrt PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} + ../Backend ../JITIDL ../Runtime ../Runtime/Base + ../Runtime/ByteCode ../Runtime/Debug ../Parser ) diff --git a/lib/Jsrt/Core/CMakeLists.txt b/lib/Jsrt/Core/CMakeLists.txt index 9cc30d2e349..ab2cd0f4bc0 100644 --- a/lib/Jsrt/Core/CMakeLists.txt +++ b/lib/Jsrt/Core/CMakeLists.txt @@ -4,8 +4,10 @@ add_library (Chakra.Jsrt.Core OBJECT target_include_directories ( Chakra.Jsrt.Core PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} + ../../Backend ../../JITIDL ../../Runtime + ../../Runtime/ByteCode ../../Parser ../ ) diff --git a/lib/Jsrt/JsrtHelper.cpp b/lib/Jsrt/JsrtHelper.cpp index 0de990bd2c5..986025aa147 100644 --- a/lib/Jsrt/JsrtHelper.cpp +++ b/lib/Jsrt/JsrtHelper.cpp @@ -6,6 +6,10 @@ #include "jsrtHelper.h" #include "Base/ThreadContextTlsEntry.h" +#ifdef DYNAMIC_PROFILE_STORAGE +#include "Language/DynamicProfileStorage.h" +#endif + #ifdef CHAKRA_STATIC_LIBRARY #include "Core/ConfigParser.h" diff --git a/lib/Runtime/Base/CMakeLists.txt b/lib/Runtime/Base/CMakeLists.txt index 27c9a60dd1f..de0af1925eb 100644 --- a/lib/Runtime/Base/CMakeLists.txt +++ b/lib/Runtime/Base/CMakeLists.txt @@ -29,9 +29,10 @@ add_library (Chakra.Runtime.Base OBJECT ThreadContextTlsEntry.cpp ThreadServiceWrapperBase.cpp Utf8SourceInfo.cpp - VTuneChakraProfile.cpp + VTuneChakraProfile.cpp # WindowsFoundationAdapter.cpp # WindowsGlobalizationAdapter.cpp + jitprofiling.cpp ) target_include_directories ( diff --git a/lib/Runtime/Base/CallInfo.h b/lib/Runtime/Base/CallInfo.h index 2508bfb91eb..e1c96c21b65 100644 --- a/lib/Runtime/Base/CallInfo.h +++ b/lib/Runtime/Base/CallInfo.h @@ -32,7 +32,7 @@ namespace Js explicit CallInfo(ushort count) : Flags(CallFlags_None) , Count(count) -#ifdef _WIN64 +#ifdef TARGET_64 , unused(0) #endif { @@ -41,7 +41,7 @@ namespace Js CallInfo(CallFlags flags, ushort count) : Flags(flags) , Count(count) -#ifdef _WIN64 +#ifdef TARGET_64 , unused(0) #endif { @@ -58,7 +58,7 @@ namespace Js // unsigned Count : 24; CallFlags Flags : 8; -#ifdef _WIN64 +#ifdef TARGET_64 unsigned unused : 32; #endif diff --git a/lib/Runtime/Base/Constants.cpp b/lib/Runtime/Base/Constants.cpp index 8b65ce14a51..c923563d086 100644 --- a/lib/Runtime/Base/Constants.cpp +++ b/lib/Runtime/Base/Constants.cpp @@ -25,6 +25,9 @@ const char16 Constants::UnknownScriptCode[] = _u("Unknown script code"); const char16 Constants::StringReplace[] = _u("String.prototype.replace"); const char16 Constants::StringMatch[] = _u("String.prototype.match"); +const uint64 Constants::ExponentMask = 0x3FF0000000000000; +const uint64 Constants::MantissaMask = 0x000FFFFFFFFFFFFF; + #ifdef _M_AMD64 const size_t Constants::StackLimitForScriptInterrupt = 0x7fffffffffffffff; #else diff --git a/lib/Runtime/Base/Constants.h b/lib/Runtime/Base/Constants.h index 80798afceae..910c5db82bc 100644 --- a/lib/Runtime/Base/Constants.h +++ b/lib/Runtime/Base/Constants.h @@ -47,8 +47,8 @@ namespace Js static const ArgSlot InvalidArgSlot = (ArgSlot)-1; static const uint32 InvalidSymID = (uint32)-1; - static const uint64 ExponentMask = 0x3FF0000000000000; - static const uint64 MantissaMask = 0x000FFFFFFFFFFFFF; + static const uint64 ExponentMask; + static const uint64 MantissaMask; static const int ReservedTypeIds = 2048; diff --git a/lib/Runtime/Base/Debug.h b/lib/Runtime/Base/Debug.h index 7cbb401e062..aa509c2107e 100644 --- a/lib/Runtime/Base/Debug.h +++ b/lib/Runtime/Base/Debug.h @@ -40,7 +40,7 @@ WCHAR* DumpCallStack(uint frameCount = -1); if(PHASE_TRACE((Phase), (Func))) \ { \ WCHAR prefixValue[512]; \ - swprintf_s(prefixValue, L"%s (#%d.%u, #%u)", (Func)->GetJITFunctionBody()->GetDisplayName(), \ + swprintf_s(prefixValue, _u("%s (#%d.%u, #%u)"), (Func)->GetJITFunctionBody()->GetDisplayName(), \ (int)(Func)->GetJITFunctionBody()->GetSourceContextId(), (Func)->GetWorkItem()->GetJITTimeInfo()->GetLocalFunctionId(), (Func)->GetJITFunctionBody()->GetFunctionNumber()); \ Output::TraceWithPrefix((Phase), prefixValue, __VA_ARGS__); \ } diff --git a/lib/Runtime/Base/FunctionBody.cpp b/lib/Runtime/Base/FunctionBody.cpp index 92a3f192a65..0dc2bf4df2a 100644 --- a/lib/Runtime/Base/FunctionBody.cpp +++ b/lib/Runtime/Base/FunctionBody.cpp @@ -596,7 +596,7 @@ namespace Js return static_cast(this->GetAuxPtrWithLock(AuxPointerType::FormalsPropIdArray)); } - void + void FunctionBody::SetFormalsPropIdArray(PropertyIdArray * propIdArray) { AssertMsg(propIdArray == nullptr || this->GetAuxPtrWithLock(AuxPointerType::FormalsPropIdArray) == nullptr, "Already set?"); @@ -978,7 +978,7 @@ namespace Js this->GetBoundPropertyRecords()->Item(pid, propRecord); return pid; - } + } SmallSpanSequence::SmallSpanSequence() : pStatementBuffer(nullptr), @@ -2893,7 +2893,7 @@ namespace Js BOOL FunctionBody::IsNativeOriginalEntryPoint() const { #if ENABLE_NATIVE_CODEGEN - return this->GetScriptContext()->IsNativeAddress(this->originalEntryPoint); + return this->GetScriptContext()->IsNativeAddress((void*)this->originalEntryPoint); #else return false; #endif @@ -2962,12 +2962,11 @@ namespace Js return IsIntermediateCodeGenThunk(directEntryPoint) || originalEntryPoint == directEntryPoint #if ENABLE_PROFILE_INFO || (directEntryPoint == DynamicProfileInfo::EnsureDynamicProfileInfoThunk && - this->IsFunctionBody() && this->GetFunctionBody()->IsNativeOriginalEntryPoint() + this->IsFunctionBody() && this->GetFunctionBody()->IsNativeOriginalEntryPoint()) #ifdef ASMJS_PLAT || (GetFunctionBody()->GetIsAsmJsFunction() && directEntryPoint == AsmJsDefaultEntryThunk) - || (IsAsmJsCodeGenThunk(directEntryPoint)) + || IsAsmJsCodeGenThunk(directEntryPoint) #endif - ); #endif ; } @@ -3151,7 +3150,7 @@ namespace Js #endif #if ENABLE_NATIVE_CODEGEN - void FunctionBody::SetNativeEntryPoint(FunctionEntryPointInfo* entryPointInfo, JavascriptMethod originalEntryPoint, Var directEntryPoint) + void FunctionBody::SetNativeEntryPoint(FunctionEntryPointInfo* entryPointInfo, JavascriptMethod originalEntryPoint, JavascriptMethod directEntryPoint) { if(entryPointInfo->nativeEntryPointProcessed) { @@ -3173,7 +3172,7 @@ namespace Js } else { - entryPointInfo->jsMethod = reinterpret_cast(directEntryPoint); + entryPointInfo->jsMethod = directEntryPoint; } if (isAsmJs) { @@ -3255,7 +3254,7 @@ namespace Js Assert(reinterpret_cast(entryPointInfo->jsMethod) == nullptr); entryPointInfo->jsMethod = entryPoint; - ((Js::LoopEntryPointInfo*)entryPointInfo)->totalJittedLoopIterations = + ((Js::LoopEntryPointInfo*)entryPointInfo)->totalJittedLoopIterations = static_cast( min( static_cast(static_cast(CONFIG_FLAG(MinBailOutsBeforeRejitForLoops))) * @@ -5892,7 +5891,7 @@ namespace Js // move back to the interpreter, the original entry point is going to be the dynamic interpreter thunk originalEntryPoint = m_dynamicInterpreterThunk - ? static_cast(InterpreterThunkEmitter::ConvertToEntryPoint(m_dynamicInterpreterThunk)) + ? reinterpret_cast(InterpreterThunkEmitter::ConvertToEntryPoint(m_dynamicInterpreterThunk)) : DefaultEntryThunk; #else originalEntryPoint = DefaultEntryThunk; @@ -7787,7 +7786,7 @@ namespace Js void EntryPointInfo::EnsureIsReadyToCall() { ProcessJitTransferData(); - + #if !FLOATVAR if (this->numberPageSegments) { @@ -8338,7 +8337,7 @@ namespace Js int index = this->inlineeFrameMap->BinarySearch([=](const NativeOffsetInlineeFramePair& pair, int index) { if (pair.offset >= offset) { - if (index == 0 || index > 0 && this->inlineeFrameMap->Item(index - 1).offset < offset) + if (index == 0 || (index > 0 && this->inlineeFrameMap->Item(index - 1).offset < offset)) { return 0; } @@ -8375,7 +8374,7 @@ namespace Js if (item.offset >= offset) { - if (midIndex == 0 || midIndex > 0 && offsets[midIndex - 1].offset < offset) + if (midIndex == 0 || (midIndex > 0 && offsets[midIndex - 1].offset < offset)) { if (offsets[midIndex].recordOffset == NativeOffsetInlineeFrameRecordOffset::InvalidRecordOffset) { @@ -8410,7 +8409,7 @@ namespace Js // find the closest entry which is greater than the current offset. if (record.offset >= offset) { - if (index == 0 || index > 0 && this->bailoutRecordMap->Item(index - 1).offset < offset) + if (index == 0 || (index > 0 && this->bailoutRecordMap->Item(index - 1).offset < offset)) { return 0; } @@ -8466,7 +8465,7 @@ namespace Js } else { - HeapDeletePlus(offsetof(PinnedTypeRefsIDL, typeRefs) + sizeof(void*)*jitTransferData->runtimeTypeRefs->count - sizeof(PinnedTypeRefsIDL), + HeapDeletePlus(offsetof(PinnedTypeRefsIDL, typeRefs) + sizeof(void*)*jitTransferData->runtimeTypeRefs->count - sizeof(PinnedTypeRefsIDL), jitTransferData->runtimeTypeRefs); } jitTransferData->runtimeTypeRefs = nullptr; @@ -8680,6 +8679,28 @@ namespace Js { if (this->GetState() != CleanedUp) { + // Unregister xdataInfo before OnCleanup() which may release xdataInfo->address +#if ENABLE_NATIVE_CODEGEN +#if defined(_M_X64) + if (this->xdataInfo != nullptr) + { + XDataAllocator::Unregister(this->xdataInfo); + HeapDelete(this->xdataInfo); + this->xdataInfo = nullptr; + } +#elif defined(_M_ARM32_OR_ARM64) + if (this->xdataInfo != nullptr) + { + XDataAllocator::Unregister(this->xdataInfo); + if (JITManager::GetJITManager()->IsOOPJITEnabled()) + { + HeapDelete(this->xdataInfo); + } + this->xdataInfo = nullptr; + } +#endif +#endif + this->OnCleanup(isShutdown); #if ENABLE_NATIVE_CODEGEN @@ -8710,7 +8731,6 @@ namespace Js { this->constructorCaches->Clear(); } - #endif // This is how we set the CleanedUp state @@ -9174,7 +9194,7 @@ namespace Js // that are using the simple JIT code, and update the original entry point as necessary as well. const JavascriptMethod newOriginalEntryPoint = functionBody->GetDynamicInterpreterEntryPoint() - ? static_cast( + ? reinterpret_cast( InterpreterThunkEmitter::ConvertToEntryPoint(functionBody->GetDynamicInterpreterEntryPoint())) : DefaultEntryThunk; const JavascriptMethod currentThunk = functionBody->GetScriptContext()->CurrentThunk; diff --git a/lib/Runtime/Base/FunctionBody.h b/lib/Runtime/Base/FunctionBody.h index 4bdff419cfc..ff5754586b9 100644 --- a/lib/Runtime/Base/FunctionBody.h +++ b/lib/Runtime/Base/FunctionBody.h @@ -88,11 +88,11 @@ namespace Js public: static PropertyGuard* New(Recycler* recycler) { return RecyclerNewLeaf(recycler, Js::PropertyGuard); } PropertyGuard() : value(GuardValue::Uninitialized) {} - PropertyGuard(intptr_t value) : value(value) - { + PropertyGuard(intptr_t value) : value(value) + { // GuardValue::Invalidated and GuardValue::Invalidated_DuringSweeping can only be set using // Invalidate() and InvalidatedDuringSweep() methods respectively. - Assert(this->value != GuardValue::Invalidated && this->value != GuardValue::Invalidated_DuringSweep); + Assert(this->value != GuardValue::Invalidated && this->value != GuardValue::Invalidated_DuringSweep); } inline static size_t const GetSizeOfValue() { return sizeof(((PropertyGuard*)0)->value); } @@ -105,7 +105,7 @@ namespace Js } bool IsInvalidatedDuringSweep() { return this->value == GuardValue::Invalidated_DuringSweep; } void SetValue(intptr_t value) - { + { // GuardValue::Invalidated and GuardValue::Invalidated_DuringSweeping can only be set using // Invalidate() and InvalidatedDuringSweep() methods respectively. Assert(value != GuardValue::Invalidated && value != GuardValue::Invalidated_DuringSweep); @@ -114,7 +114,7 @@ namespace Js intptr_t const* GetAddressOfValue() { return &this->value; } void Invalidate() { this->value = GuardValue::Invalidated; } void InvalidateDuringSweep() - { + { #if DBG wasReincarnated = true; #endif @@ -513,7 +513,7 @@ namespace Js #if ENABLE_NATIVE_CODEGEN NativeCodeData * inProcJITNaticeCodedata; char* nativeDataBuffer; - union + union { Js::JavascriptNumber** numberArray; CodeGenNumberChunk* numberChunks; @@ -588,7 +588,7 @@ namespace Js void SetNumberChunks(CodeGenNumberChunk* chunks) { Assert(numberPageSegments == nullptr); - numberChunks = chunks; + numberChunks = chunks; } void SetNumberArray(Js::JavascriptNumber** array) { @@ -598,9 +598,9 @@ namespace Js void SetNumberPageSegment(XProcNumberPageSegment * segments) { Assert(numberPageSegments == nullptr); - numberPageSegments = segments; + numberPageSegments = segments; } - + #endif private: @@ -1106,7 +1106,7 @@ namespace Js { public: LoopHeader* loopHeader; - uint jittedLoopIterationsSinceLastBailout; // number of times the loop iterated in the jitted code before bailing out + uint jittedLoopIterationsSinceLastBailout; // number of times the loop iterated in the jitted code before bailing out uint totalJittedLoopIterations; // total number of times the loop has iterated in the jitted code for this entry point for a particular invocation of the loop LoopEntryPointInfo(LoopHeader* loopHeader, Js::JavascriptLibrary* library, void* validationCookie) : EntryPointInfo(nullptr, library, validationCookie, /*threadContext*/ nullptr, /*isLoopBody*/ true), @@ -1824,6 +1824,12 @@ namespace Js friend class ByteCodeBufferBuilder; friend class ByteCodeBufferReader; +#ifdef DYNAMIC_PROFILE_MUTATOR + friend class ::DynamicProfileMutator; + friend class ::DynamicProfileMutatorImpl; +#endif + friend class RemoteFunctionBody; + public: // same as MachDouble, used in the Func.h static const uint DIAGLOCALSLOTSIZE = 8; @@ -2165,7 +2171,7 @@ namespace Js #endif WriteBarrierPtr defaultFunctionEntryPointInfo; -#if ENABLE_PROFILE_INFO +#if ENABLE_PROFILE_INFO WriteBarrierPtr dynamicProfileInfo; #endif @@ -2184,7 +2190,7 @@ namespace Js #endif ); - void SetNativeEntryPoint(FunctionEntryPointInfo* entryPointInfo, JavascriptMethod originalEntryPoint, Var directEntryPoint); + void SetNativeEntryPoint(FunctionEntryPointInfo* entryPointInfo, JavascriptMethod originalEntryPoint, JavascriptMethod directEntryPoint); #if DYNAMIC_INTERPRETER_THUNK void GenerateDynamicInterpreterThunk(); #endif @@ -2855,8 +2861,8 @@ namespace Js (GetIsStrictMode() || hasNonSimpleParams) // Neither of the scopes are objects && !HasScopeObject(); - - return + + return // Regardless of the conditions above, we won't need a scope object if there aren't any formals. (GetInParamsCount() > 1 || GetHasRestParameter()) && !dontNeedScopeObject; @@ -3277,12 +3283,6 @@ namespace Js void EnsureAuxStatementData(); StatementAdjustmentRecordList* GetStatementAdjustmentRecords(); - -#ifdef DYNAMIC_PROFILE_MUTATOR - friend class DynamicProfileMutator; - friend class DynamicProfileMutatorImpl; -#endif - friend class RemoteFunctionBody; }; typedef SynchronizableList > FunctionBodyList; diff --git a/lib/Runtime/Base/ScriptContext.cpp b/lib/Runtime/Base/ScriptContext.cpp index 8d98c634da5..296e4e6e9e1 100644 --- a/lib/Runtime/Base/ScriptContext.cpp +++ b/lib/Runtime/Base/ScriptContext.cpp @@ -4824,9 +4824,9 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie return (JavascriptMethod)this->interpreterThunkEmitter->GetNextThunk(ppDynamicInterpreterThunk); } - BOOL ScriptContext::IsDynamicInterpreterThunk(void* address) + BOOL ScriptContext::IsDynamicInterpreterThunk(JavascriptMethod address) { - return this->interpreterThunkEmitter->IsInHeap(address); + return this->interpreterThunkEmitter->IsInHeap((void*)address); } void ScriptContext::ReleaseDynamicInterpreterThunk(BYTE* address, bool addtoFreeList) diff --git a/lib/Runtime/Base/ScriptContext.h b/lib/Runtime/Base/ScriptContext.h index c78b6f44e7a..ed32573e021 100644 --- a/lib/Runtime/Base/ScriptContext.h +++ b/lib/Runtime/Base/ScriptContext.h @@ -1571,7 +1571,7 @@ namespace Js static void SetEntryPointToProfileThunk(JavascriptFunction* function); static void RestoreEntryPointFromProfileThunk(JavascriptFunction* function); #endif - + static void RecyclerEnumClassEnumeratorCallback(void *address, size_t size); static void RecyclerFunctionCallbackForDebugger(void *address, size_t size); @@ -1647,7 +1647,7 @@ namespace Js #if DYNAMIC_INTERPRETER_THUNK JavascriptMethod GetNextDynamicAsmJsInterpreterThunk(PVOID* ppDynamicInterpreterThunk); JavascriptMethod GetNextDynamicInterpreterThunk(PVOID* ppDynamicInterpreterThunk); - BOOL IsDynamicInterpreterThunk(void* address); + BOOL IsDynamicInterpreterThunk(JavascriptMethod address); void ReleaseDynamicInterpreterThunk(BYTE* address, bool addtoFreeList); void ReleaseDynamicAsmJsInterpreterThunk(BYTE* address, bool addtoFreeList); #endif diff --git a/lib/Runtime/Base/ThreadContext.cpp b/lib/Runtime/Base/ThreadContext.cpp index 40b07eea40b..1989d7acb8f 100644 --- a/lib/Runtime/Base/ThreadContext.cpp +++ b/lib/Runtime/Base/ThreadContext.cpp @@ -215,7 +215,7 @@ ThreadContext::ThreadContext(AllocationPolicyManager * allocationPolicyManager, #ifdef ENABLE_CUSTOM_ENTROPY entropy.Initialize(); #endif - + #if ENABLE_NATIVE_CODEGEN this->bailOutRegisterSaveSpace = AnewArrayZ(this->GetThreadAlloc(), Js::Var, GetBailOutRegisterSaveSlotCount()); #endif @@ -321,7 +321,7 @@ ThreadContext::GetThreadStackLimitAddr() const return (intptr_t)GetAddressOfStackLimitForCurrentThread(); } -#if ENABLE_NATIVE_CODEGEN && (defined(_M_IX86) || defined(_M_X64)) +#if ENABLE_NATIVE_CODEGEN && defined(ENABLE_SIMDJS) && (defined(_M_IX86) || defined(_M_X64)) intptr_t ThreadContext::GetSimdTempAreaAddr(uint8 tempIndex) const { @@ -329,7 +329,7 @@ ThreadContext::GetSimdTempAreaAddr(uint8 tempIndex) const } #endif -intptr_t +intptr_t ThreadContext::GetDisableImplicitFlagsAddr() const { return (intptr_t)&disableImplicitFlags; @@ -1017,7 +1017,7 @@ ThreadContext::UncheckedAddPropertyId(JsUtil::CharacterBuffer const& prop if(this->TTDLog != nullptr && this->TTDLog->ShouldPerformDebugAction_SymbolCreation()) { //We reload all properties that occour in the trace so they only way we get here in TTD mode is: - //(1) if the program is creating a new symbol (which always gets a fresh id) and we should recreate it or + //(1) if the program is creating a new symbol (which always gets a fresh id) and we should recreate it or //(2) if it is forcing arguments in debug parse mode (instead of regular which we recorded in) if(isSymbol) { @@ -1984,9 +1984,9 @@ ThreadContext::EnsureJITThreadContext(bool allowPrereserveAlloc) // TODO: OOP JIT, use more generic method for getting name, e.g. in case of ChakraTest.dll #ifdef NTBUILD - contextData.chakraBaseAddress = (intptr_t)GetModuleHandle(L"Chakra.dll"); + contextData.chakraBaseAddress = (intptr_t)GetModuleHandle(_u("Chakra.dll")); #else - contextData.chakraBaseAddress = (intptr_t)GetModuleHandle(L"ChakraCore.dll"); + contextData.chakraBaseAddress = (intptr_t)GetModuleHandle(_u("ChakraCore.dll")); #endif contextData.crtBaseAddress = (intptr_t)GetModuleHandle(UCrtC99MathApis::LibraryName); contextData.threadStackLimitAddr = reinterpret_cast(GetAddressOfStackLimitForCurrentThread()); @@ -2000,7 +2000,7 @@ ThreadContext::EnsureJITThreadContext(bool allowPrereserveAlloc) contextData.scriptStackLimit = GetScriptStackLimit(); contextData.isThreadBound = IsThreadBound(); contextData.allowPrereserveAlloc = allowPrereserveAlloc; -#if _M_IX86 || _M_AMD64 +#if defined(ENABLE_SIMDJS) && (_M_IX86 || _M_AMD64) contextData.simdTempAreaBaseAddr = (intptr_t)GetSimdTempArea(); #endif @@ -2102,7 +2102,7 @@ ThreadContext::ExecuteRecyclerCollectionFunction(Recycler * recycler, Collection #if ENABLE_TTD // - //TODO: We leak any references that are JsReleased by the host in collection callbacks. Later we should defer these events to the end of the + //TODO: We leak any references that are JsReleased by the host in collection callbacks. Later we should defer these events to the end of the // top-level call or the next external call and then append them to the log. // @@ -3277,7 +3277,7 @@ ThreadContext::RegisterUniquePropertyGuard(Js::PropertyId propertyId, RecyclerWe bool foundExistingGuard; - + PropertyGuardEntry* entry = EnsurePropertyGuardEntry(propertyRecord, foundExistingGuard); entry->uniqueGuards.Item(guardWeakRef); @@ -3357,7 +3357,7 @@ ThreadContext::InvalidatePropertyGuardEntry(const Js::PropertyRecord* propertyRe entry->uniqueGuards.Clear(); - + // Count no. of invalidations done so far. Exclude if this is all property guards invalidation in which case // the unique Guards will be cleared anyway. if (!isAllPropertyGuardsInvalidation) diff --git a/lib/Runtime/Base/ThreadContext.h b/lib/Runtime/Base/ThreadContext.h index 8358f364173..fdde02a132e 100644 --- a/lib/Runtime/Base/ThreadContext.h +++ b/lib/Runtime/Base/ThreadContext.h @@ -475,7 +475,7 @@ class ThreadContext sealed : void GetSimdFuncSignatureFromOpcode(Js::OpCode op, SimdFuncSignature &funcSignature); #if _M_IX86 || _M_AMD64 - // auxiliary SIMD values in memory to help JIT'ed code. E.g. used for Int8x16 shuffle. + // auxiliary SIMD values in memory to help JIT'ed code. E.g. used for Int8x16 shuffle. _x86_SIMDValue X86_TEMP_SIMD[SIMD_TEMP_SIZE]; _x86_SIMDValue * GetSimdTempArea() { return X86_TEMP_SIMD; } #endif @@ -927,7 +927,7 @@ class ThreadContext sealed : JITTelemetry.Reset(); } #endif - + ParserStats GetParserStats() { return ParserTelemetry.GetStats(); @@ -1302,7 +1302,7 @@ class ThreadContext sealed : virtual intptr_t GetThreadStackLimitAddr() const override; -#if ENABLE_NATIVE_CODEGEN && (defined(_M_IX86) || defined(_M_X64)) +#if ENABLE_NATIVE_CODEGEN && defined(ENABLE_SIMDJS) && (defined(_M_IX86) || defined(_M_X64)) virtual intptr_t GetSimdTempAreaAddr(uint8 tempIndex) const override; #endif @@ -1508,7 +1508,7 @@ class ThreadContext sealed : return entropy; } #endif - + Js::ImplicitCallFlags * GetAddressOfImplicitCallFlags() { return &implicitCallFlags; diff --git a/lib/Runtime/Base/ThreadContextInfo.cpp b/lib/Runtime/Base/ThreadContextInfo.cpp index cdd215d7783..0b6d4247c88 100644 --- a/lib/Runtime/Base/ThreadContextInfo.cpp +++ b/lib/Runtime/Base/ThreadContextInfo.cpp @@ -367,7 +367,7 @@ ThreadContextInfo::IsJITActive() intptr_t SHIFT_ADDR(const ThreadContextInfo*const context, intptr_t address) { -#if ENABLE_NATIVE_CODEGEN +#if ENABLE_OOP_NATIVE_CODEGEN Assert(AutoSystemInfo::Data.IsJscriptModulePointer((void*)address)); ptrdiff_t diff = 0; if (JITManager::GetJITManager()->IsJITServer()) @@ -387,10 +387,10 @@ intptr_t SHIFT_ADDR(const ThreadContextInfo*const context, intptr_t address) intptr_t SHIFT_CRT_ADDR(const ThreadContextInfo*const context, intptr_t address) { -#if ENABLE_NATIVE_CODEGEN +#if ENABLE_OOP_NATIVE_CODEGEN if (AutoSystemInfo::Data.IsJscriptModulePointer((void*)address)) { - // the function is compiled to chakra.dll, or statically linked to crt + // the function is compiled to chakra.dll, or statically linked to crt return SHIFT_ADDR(context, address); } ptrdiff_t diff = 0; diff --git a/lib/Runtime/Base/ThreadContextInfo.h b/lib/Runtime/Base/ThreadContextInfo.h index 3857410171c..b2e20bbd73f 100644 --- a/lib/Runtime/Base/ThreadContextInfo.h +++ b/lib/Runtime/Base/ThreadContextInfo.h @@ -82,7 +82,7 @@ class ThreadContextInfo virtual intptr_t GetImplicitCallFlagsAddr() const = 0; #if ENABLE_NATIVE_CODEGEN -#if defined(_M_IX86) || defined(_M_X64) +#if defined(ENABLE_SIMDJS) && (defined(_M_IX86) || defined(_M_X64)) virtual intptr_t GetSimdTempAreaAddr(uint8 tempIndex) const = 0; #endif virtual intptr_t GetBailOutRegisterSaveSpaceAddr() const = 0; @@ -114,7 +114,7 @@ class ThreadContextInfo uint m_activeJITCount; bool m_isAllJITCodeInPreReservedRegion; - + }; diff --git a/lib/Runtime/ByteCode/AsmJsByteCodeDumper.cpp b/lib/Runtime/ByteCode/AsmJsByteCodeDumper.cpp index 29c0a645c94..8b720e0eb2b 100644 --- a/lib/Runtime/ByteCode/AsmJsByteCodeDumper.cpp +++ b/lib/Runtime/ByteCode/AsmJsByteCodeDumper.cpp @@ -259,7 +259,7 @@ namespace Js Output::Print(_u("\tI4(%d, %d, %d, %d),"), simdTable->i32[SIMD_X], simdTable->i32[SIMD_Y], simdTable->i32[SIMD_Z], simdTable->i32[SIMD_W]); Output::Print(_u("\tF4(%.4f, %.4f, %.4f, %.4f),"), simdTable->f32[SIMD_X], simdTable->f32[SIMD_Y], simdTable->f32[SIMD_Z], simdTable->f32[SIMD_W]); Output::Print(_u("\tD2(%.4f, %.4f)\n "), simdTable->f64[SIMD_X], simdTable->f64[SIMD_Y]); - Output::Print(_u("\tI8(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d )\n "), + Output::Print(_u("\tI8(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d )\n "), simdTable->i8[0], simdTable->i8[1], simdTable->i8[2], simdTable->i8[3], simdTable->i8[4], simdTable->i8[5], simdTable->i8[6], simdTable->i8[7], simdTable->i8[8], simdTable->i8[9], simdTable->i8[10], simdTable->i8[11], simdTable->i8[12], simdTable->i8[13], simdTable->i8[14], simdTable->i8[15]); ++simdTable; @@ -329,12 +329,12 @@ namespace Js void AsmJsByteCodeDumper::DumpUint32x4Reg(RegSlot reg) { - Output::Print(L"U4_%d ", (int)reg); + Output::Print(_u("U4_%d "), (int)reg); } void AsmJsByteCodeDumper::DumpInt16x8Reg(RegSlot reg) { - Output::Print(L"I8_%d ", (int)reg); + Output::Print(_u("I8_%d "), (int)reg); } // Int8x16 @@ -350,24 +350,24 @@ namespace Js void AsmJsByteCodeDumper::DumpUint8x16Reg(RegSlot reg) { - Output::Print(L"U16_%d ", (int)reg); + Output::Print(_u("U16_%d "), (int)reg); } // Bool32x4 void AsmJsByteCodeDumper::DumpBool32x4Reg(RegSlot reg) { - Output::Print(L"B4_%d ", (int)reg); + Output::Print(_u("B4_%d "), (int)reg); } // Bool16x8 void AsmJsByteCodeDumper::DumpBool16x8Reg(RegSlot reg) { - Output::Print(L"B8_%d ", (int)reg); + Output::Print(_u("B8_%d "), (int)reg); } // Bool32x4 void AsmJsByteCodeDumper::DumpBool8x16Reg(RegSlot reg) { - Output::Print(L"B16_%d ", (int)reg); + Output::Print(_u("B16_%d "), (int)reg); } // Float64x2 @@ -413,13 +413,13 @@ namespace Js Output::Print(_u(" I4_%d = R%d[%d] "), data->Value, data->Instance, data->SlotIndex); break; case OpCodeAsmJs::Simd128_LdSlot_B4: - Output::Print(L" B4_%d = R%d[%d] ", data->Value, data->Instance, data->SlotIndex); + Output::Print(_u(" B4_%d = R%d[%d] "), data->Value, data->Instance, data->SlotIndex); break; case OpCodeAsmJs::Simd128_LdSlot_B8: - Output::Print(L" B8_%d = R%d[%d] ", data->Value, data->Instance, data->SlotIndex); + Output::Print(_u(" B8_%d = R%d[%d] "), data->Value, data->Instance, data->SlotIndex); break; case OpCodeAsmJs::Simd128_LdSlot_B16: - Output::Print(L" B16_%d = R%d[%d] ", data->Value, data->Instance, data->SlotIndex); + Output::Print(_u(" B16_%d = R%d[%d] "), data->Value, data->Instance, data->SlotIndex); break; #if 0 case OpCodeAsmJs::Simd128_LdSlot_D2: @@ -435,13 +435,13 @@ namespace Js Output::Print(_u(" R%d[%d] = I4_%d"), data->Instance, data->SlotIndex, data->Value); break; case OpCodeAsmJs::Simd128_StSlot_B4: - Output::Print(L" R%d[%d] = B4_%d", data->Instance, data->SlotIndex, data->Value); + Output::Print(_u(" R%d[%d] = B4_%d"), data->Instance, data->SlotIndex, data->Value); break; case OpCodeAsmJs::Simd128_StSlot_B8: - Output::Print(L" R%d[%d] = B8_%d", data->Instance, data->SlotIndex, data->Value); + Output::Print(_u(" R%d[%d] = B8_%d"), data->Instance, data->SlotIndex, data->Value); break; case OpCodeAsmJs::Simd128_StSlot_B16: - Output::Print(L" R%d[%d] = B16_%d", data->Instance, data->SlotIndex, data->Value); + Output::Print(_u(" R%d[%d] = B16_%d"), data->Instance, data->SlotIndex, data->Value); break; #if 0 case OpCodeAsmJs::Simd128_StSlot_D2: @@ -460,7 +460,7 @@ namespace Js template void AsmJsByteCodeDumper::DumpAsmTypedArr(OpCodeAsmJs op, const unaligned T * data, FunctionBody * dumpFunction, ByteCodeReader& reader) { - char16* heapTag = nullptr; + const char16* heapTag = nullptr; char16 valueTag = 'I'; switch (data->ViewType) { @@ -1031,7 +1031,7 @@ namespace Js DumpInt32x4Reg(data->I4_1); DumpInt32x4Reg(data->I4_2); } - + template void AsmJsByteCodeDumper::DumpInt32x4_1Bool32x4_1Int32x4_2(OpCodeAsmJs op, const unaligned T * data, FunctionBody * dumpFunction, ByteCodeReader& reader) { @@ -1040,7 +1040,7 @@ namespace Js DumpInt32x4Reg(data->I4_2); DumpInt32x4Reg(data->I4_3); } - + template void AsmJsByteCodeDumper::DumpInt32x4_1Int4(OpCodeAsmJs op, const unaligned T * data, FunctionBody * dumpFunction, ByteCodeReader& reader) { @@ -1544,7 +1544,7 @@ namespace Js DumpInt8x16Reg(data->I16_0); DumpUint8x16Reg(data->U16_1); } - + // Disabled for now #if 0 // Float64x2 @@ -1748,7 +1748,7 @@ namespace Js DumpInt16x8Reg(data->I8_1); DumpInt16x8Reg(data->I8_2); } - + template void AsmJsByteCodeDumper::DumpBool16x8_1Int16x8_2(OpCodeAsmJs op, const unaligned T * data, FunctionBody * dumpFunction, ByteCodeReader& reader) { @@ -1895,7 +1895,7 @@ namespace Js DumpUint32x4Reg(data->U4_1); DumpUint32x4Reg(data->U4_2); } - + template void AsmJsByteCodeDumper::DumpBool32x4_1Uint32x4_2(OpCodeAsmJs op, const unaligned T * data, FunctionBody * dumpFunction, ByteCodeReader& reader) { @@ -2054,7 +2054,7 @@ namespace Js DumpUint16x8Reg(data->U8_1); DumpUint16x8Reg(data->U8_2); } - + template void AsmJsByteCodeDumper::DumpBool16x8_1Uint16x8_2(OpCodeAsmJs op, const unaligned T * data, FunctionBody * dumpFunction, ByteCodeReader& reader) { @@ -2228,7 +2228,7 @@ namespace Js DumpUint8x16Reg(data->U16_1); DumpIntReg(data->I2); DumpIntReg(data->I3); - + } template @@ -2329,11 +2329,11 @@ namespace Js DumpBool8x16Reg(data->B16_0); DumpIntReg(data->I1); } - + template void AsmJsByteCodeDumper::DumpAsmSimdTypedArr(OpCodeAsmJs op, const unaligned T * data, FunctionBody * dumpFunction, ByteCodeReader& reader) { - char16* heapTag = nullptr; + const char16* heapTag = nullptr; switch (data->ViewType) { diff --git a/lib/Runtime/ByteCode/ByteCodeReader.cpp b/lib/Runtime/ByteCode/ByteCodeReader.cpp index 7f31ddc629f..0a251a7421e 100644 --- a/lib/Runtime/ByteCode/ByteCodeReader.cpp +++ b/lib/Runtime/ByteCode/ByteCodeReader.cpp @@ -49,21 +49,6 @@ namespace Js #endif } - template - const unaligned LayoutType * ByteCodeReader::GetLayout() - { - size_t layoutSize = sizeof(LayoutType); - - AssertMsg((layoutSize > 0) && (layoutSize < 100), "Ensure valid layout size"); - - const byte * layoutData = m_currentLocation; - m_currentLocation += layoutSize; - - Assert(m_currentLocation <= m_endLocation); - - return reinterpret_cast(layoutData); - } - template const unaligned LayoutType * ByteCodeReader::GetLayout(const byte*& ip) { @@ -80,12 +65,6 @@ namespace Js return reinterpret_cast(layoutData); } - template<> - const unaligned OpLayoutEmpty * ByteCodeReader::GetLayout() - { - return nullptr; - } - template<> const unaligned OpLayoutEmpty * ByteCodeReader::GetLayout(const byte*& ip) { diff --git a/lib/Runtime/ByteCode/ByteCodeReader.h b/lib/Runtime/ByteCode/ByteCodeReader.h index d2272d13abf..9effd33b88f 100644 --- a/lib/Runtime/ByteCode/ByteCodeReader.h +++ b/lib/Runtime/ByteCode/ByteCodeReader.h @@ -86,4 +86,25 @@ namespace Js #endif }; + template + inline const unaligned LayoutType * ByteCodeReader::GetLayout() + { + size_t layoutSize = sizeof(LayoutType); + + AssertMsg((layoutSize > 0) && (layoutSize < 100), "Ensure valid layout size"); + + const byte * layoutData = m_currentLocation; + m_currentLocation += layoutSize; + + Assert(m_currentLocation <= m_endLocation); + + return reinterpret_cast(layoutData); + } + + template<> + inline const unaligned OpLayoutEmpty * ByteCodeReader::GetLayout() + { + return nullptr; + } + } // namespace Js diff --git a/lib/Runtime/ByteCode/ByteCodeWriter.cpp b/lib/Runtime/ByteCode/ByteCodeWriter.cpp index c9867563528..71c9517643f 100644 --- a/lib/Runtime/ByteCode/ByteCodeWriter.cpp +++ b/lib/Runtime/ByteCode/ByteCodeWriter.cpp @@ -292,7 +292,7 @@ namespace Js return this->m_functionWrite->MapRegSlot(reg); } - inline void ByteCodeWriter::CheckOpen() + void ByteCodeWriter::CheckOpen() { AssertMsg(m_functionWrite != nullptr, "Must Begin() a function to write byte-code into"); } @@ -306,7 +306,7 @@ namespace Js AssertMsg(OpCodeUtil::GetOpCodeLayout(op) == layoutType, "Ensure correct layout for OpCode"); } - inline void ByteCodeWriter::CheckLabel(ByteCodeLabel labelID) + void ByteCodeWriter::CheckLabel(ByteCodeLabel labelID) { AssertMsg(labelID < m_labelOffsets->Count(), "Label must be previously defined before being marked in the byte-code"); @@ -3276,7 +3276,7 @@ namespace Js return Write(rawData, byteSize); } - inline uint ByteCodeWriter::Data::Write(__in_bcount(byteSize) const void* data, __in uint byteSize) + uint ByteCodeWriter::Data::Write(__in_bcount(byteSize) const void* data, __in uint byteSize) { // Simple case where the current chunk has enough space. uint bytesFree = current->RemainingBytes(); diff --git a/lib/Runtime/ByteCode/ByteCodeWriter.h b/lib/Runtime/ByteCode/ByteCodeWriter.h index 8d620b3ebe7..1ada1c603a9 100644 --- a/lib/Runtime/ByteCode/ByteCodeWriter.h +++ b/lib/Runtime/ByteCode/ByteCodeWriter.h @@ -73,7 +73,7 @@ namespace Js uint currentOffset; // The global offset of last byte written to in the linked data structure bool fixedGrowthPolicy; - inline uint Write(__in_bcount(byteSize) const void* data, __in uint byteSize); + uint Write(__in_bcount(byteSize) const void* data, __in uint byteSize); _NOINLINE void SlowWrite(__in_bcount(byteSize) const void* data, __in uint byteSize); void AddChunk(uint byteSize); @@ -201,8 +201,8 @@ namespace Js RegSlot ConsumeReg(RegSlot reg); - inline void CheckOpen(); - inline void CheckLabel(ByteCodeLabel labelID); + void CheckOpen(); + void CheckLabel(ByteCodeLabel labelID); inline void CheckOp(OpCode op, OpLayoutType layoutType); inline void CheckReg(RegSlot registerID); diff --git a/lib/Runtime/Language/Arguments.h b/lib/Runtime/Language/Arguments.h index 52d034c411e..a7884f67622 100644 --- a/lib/Runtime/Language/Arguments.h +++ b/lib/Runtime/Language/Arguments.h @@ -4,17 +4,61 @@ //------------------------------------------------------------------------------------------------------- #pragma once +// To extract variadic args array after known args list: +// argx, callInfo, ... +// NOTE: The last known arg name is hard-coded to "callInfo". #ifdef _WIN32 -#define VA_LIST_TO_VARARRAY(vl, va, callInfo) Js::Var* va = (Js::Var*) vl; +#define DECLARE_ARGS_VARARRAY(va, ...) \ + va_list _vl; \ + va_start(_vl, callInfo); \ + Js::Var* va = (Js::Var*)_vl #else #if defined(_M_X64) || defined(_M_IX86) // We use a custom calling convention to invoke JavascriptMethod based on // System V AMD64 ABI. At entry of JavascriptMethod the stack layout is: // [Return Address] [function] [callInfo] [arg0] [arg1] ... // -#define VA_LIST_TO_VARARRAY(vl, va, callInfo) \ - Js::Var* va = reinterpret_cast(_AddressOfReturnAddress()) + 3; \ - Assert(*reinterpret_cast(va - 1) == callInfo); +#define DECLARE_ARGS_VARARRAY_N(va, n) \ + Js::Var* va = _get_va(_AddressOfReturnAddress(), n); \ + Assert(*reinterpret_cast(va - 1) == callInfo) + +#define DECLARE_ARGS_VARARRAY(va, ...) \ + DECLARE_ARGS_VARARRAY_N(va, _count_args(__VA_ARGS__)) + +inline Js::Var* _get_va(void* addrOfReturnAddress, int n) +{ + // All args are right after ReturnAddress by custom calling convention + Js::Var* pArgs = reinterpret_cast(addrOfReturnAddress) + 1; + return pArgs + n; +} + +inline int _count_args(Js::CallInfo callInfo) +{ + // This is to support typical runtime "ARGUMENTS(args, callInfo)" usage. + // Only "callInfo" listed, but we have 2 known args "function, callInfo". + return 2; +} +template +inline int _count_args(const T1&, Js::CallInfo callInfo) +{ + return 2; +} +template +inline int _count_args(const T1&, const T2&, Js::CallInfo callInfo) +{ + return 3; +} +template +inline int _count_args(const T1&, const T2&, const T3&, Js::CallInfo callInfo) +{ + return 4; +} +template +inline int _count_args(const T1&, const T2&, const T3&, const T4&, Js::CallInfo callInfo) +{ + return 5; +} + #else #error Not yet implemented #endif @@ -45,18 +89,20 @@ * used by JavaScript functions. It is a low level macro that does not try to * differentiate between script usable Vars and runtime data structures. * To be able to access only script usable args use the ARGUMENTS macro instead. + * + * The ... list must be + * * "callInfo", typically for JsMethod that has only 2 known args + * "function, callInfo"; + * * or the full known args list ending with "callInfo" (for some runtime + * helpers). */ -#define RUNTIME_ARGUMENTS(n, s) \ - va_list argptr; \ - va_start(argptr, s); \ - VA_LIST_TO_VARARRAY(argptr, _argsVarArray, s) \ - Js::Arguments n(s, _argsVarArray); - -#define ARGUMENTS(n, s) \ - va_list argptr; \ - va_start(argptr, s); \ - VA_LIST_TO_VARARRAY(argptr, _argsVarArray, s) \ - Js::ArgumentReader n(&s, _argsVarArray); +#define RUNTIME_ARGUMENTS(n, ...) \ + DECLARE_ARGS_VARARRAY(_argsVarArray, __VA_ARGS__); \ + Js::Arguments n(callInfo, _argsVarArray); + +#define ARGUMENTS(n, ...) \ + DECLARE_ARGS_VARARRAY(_argsVarArray, __VA_ARGS__); \ + Js::ArgumentReader n(&callInfo, _argsVarArray); namespace Js { diff --git a/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp b/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp index 5d331bedd6a..a0635b3eae4 100644 --- a/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp +++ b/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp @@ -20,7 +20,7 @@ namespace Js { - enum EBinaryMathOpCodes + enum EBinaryMathOpCodes: int { BMO_ADD, BMO_SUB, @@ -31,7 +31,7 @@ namespace Js BMO_MAX, }; - enum EBinaryMathOpCodesTypes + enum EBinaryMathOpCodesTypes: int { BMOT_Int, BMOT_UInt, @@ -48,7 +48,7 @@ namespace Js /*BMO_REM*/{ OpCodeAsmJs::Rem_Int, OpCodeAsmJs::Rem_UInt,OpCodeAsmJs::Nop, OpCodeAsmJs::Rem_Db } }; - enum EBinaryComparatorOpCodes + enum EBinaryComparatorOpCodes: int { /*<, <=, >, >=, ==, !=*/ BCO_LT, @@ -586,7 +586,7 @@ namespace Js case knopFlt: if (ParserWrapper::IsMinInt(pnode)) { - return EmitExpressionInfo(mFunction->GetConstRegister(MININT32), AsmJsType::Signed); + return EmitExpressionInfo(mFunction->GetConstRegister(INT32_MIN), AsmJsType::Signed); } else if (ParserWrapper::IsUnsigned(pnode)) { @@ -1749,7 +1749,7 @@ namespace Js argsInfo[6].location, argsInfo[7].location); break; case 9: - mWriter.AsmReg10(op, dst, argsInfo[0].location, argsInfo[1].location, argsInfo[2].location, argsInfo[3].location, argsInfo[4].location, argsInfo[5].location, + mWriter.AsmReg10(op, dst, argsInfo[0].location, argsInfo[1].location, argsInfo[2].location, argsInfo[3].location, argsInfo[4].location, argsInfo[5].location, argsInfo[6].location, argsInfo[7].location, argsInfo[8].location); break; case 10: @@ -1758,7 +1758,7 @@ namespace Js break; case 16: mWriter.AsmReg17(op, dst, argsInfo[0].location, argsInfo[1].location, argsInfo[2].location, argsInfo[3].location, argsInfo[4].location, argsInfo[5].location, - argsInfo[6].location, argsInfo[7].location, argsInfo[8].location, argsInfo[9].location, argsInfo[10].location, argsInfo[11].location, + argsInfo[6].location, argsInfo[7].location, argsInfo[8].location, argsInfo[9].location, argsInfo[10].location, argsInfo[11].location, argsInfo[12].location, argsInfo[13].location, argsInfo[14].location, argsInfo[15].location); break; @@ -2395,7 +2395,7 @@ namespace Js else if (ParserWrapper::IsMinInt(indexNode)) { // this is going to be an error, but we can do this to allow it to get same error message as invalid int - slot = (uint32)MININT32; + slot = (uint32)INT32_MIN; } else if (ParserWrapper::IsUnsigned(indexNode)) { diff --git a/lib/Runtime/Language/AsmJsByteCodeGenerator.h b/lib/Runtime/Language/AsmJsByteCodeGenerator.h index 17b48023120..4a33ce0505e 100644 --- a/lib/Runtime/Language/AsmJsByteCodeGenerator.h +++ b/lib/Runtime/Language/AsmJsByteCodeGenerator.h @@ -7,6 +7,9 @@ #ifndef TEMP_DISABLE_ASMJS namespace Js { + enum EBinaryMathOpCodes: int; + enum EBinaryComparatorOpCodes: int; + // Information about the expression that has been emitted struct EmitExpressionInfo { @@ -94,11 +97,11 @@ namespace Js EmitExpressionInfo EmitUnaryNeg( ParseNode * pnode ); EmitExpressionInfo EmitUnaryNot( ParseNode * pnode ); EmitExpressionInfo EmitUnaryLogNot( ParseNode * pnode ); - EmitExpressionInfo EmitBinaryMultiType( ParseNode * pnode, enum EBinaryMathOpCodes op ); + EmitExpressionInfo EmitBinaryMultiType( ParseNode * pnode, EBinaryMathOpCodes op ); EmitExpressionInfo EmitBinaryInt( ParseNode * pnode, OpCodeAsmJs op ); EmitExpressionInfo EmitQMark( ParseNode * pnode ); EmitExpressionInfo EmitSwitch( ParseNode * pnode ); - EmitExpressionInfo EmitBinaryComparator( ParseNode * pnode, enum EBinaryComparatorOpCodes op); + EmitExpressionInfo EmitBinaryComparator( ParseNode * pnode, EBinaryComparatorOpCodes op); EmitExpressionInfo EmitLoop( ParseNode *loopNode, ParseNode *cond, ParseNode *body, ParseNode *incr, BOOL doWhile = false ); EmitExpressionInfo EmitIf( ParseNode * pnode ); EmitExpressionInfo EmitBooleanExpression( ParseNode* pnodeCond, Js::ByteCodeLabel trueLabel, Js::ByteCodeLabel falseLabel ); diff --git a/lib/Runtime/Language/AsmJsEncoder.inl b/lib/Runtime/Language/AsmJsEncoder.inl index d85b9bda4d1..88a62677795 100644 --- a/lib/Runtime/Language/AsmJsEncoder.inl +++ b/lib/Runtime/Language/AsmJsEncoder.inl @@ -69,7 +69,7 @@ typedef double( *UnaryDoubleFunc )( double ); { \ PROCESS_ENCODE_READ_LAYOUT_ASMJS(name, Double2, suffix); \ int offsets[2] = {CalculateOffset(playout->D0),CalculateOffset(playout->D1)};\ - AsmJsJitTemplate::Call_Db::ApplyTemplate( this, mPc, 2, offsets, ((UnaryDoubleFunc)(func)),addEsp );\ + AsmJsJitTemplate::Call_Db::ApplyTemplate( this, mPc, 2, offsets, ((void*)(UnaryDoubleFunc)(func)),addEsp );\ break; \ } #define PROCESS_ENCODE_CALLDOUBLE2(name,func,layout) PROCESS_ENCODE_CALLDOUBLE2_COMMON(name,func,layout,) @@ -80,7 +80,7 @@ typedef double( *BinaryDoubleFunc )( double, double ); { \ PROCESS_ENCODE_READ_LAYOUT_ASMJS(name, Double3, suffix); \ int offsets[3] = {CalculateOffset(playout->D0),CalculateOffset(playout->D1),CalculateOffset(playout->D2)};\ - AsmJsJitTemplate::Call_Db::ApplyTemplate( this, mPc, 3, offsets, ((BinaryDoubleFunc)(func)),addEsp );\ + AsmJsJitTemplate::Call_Db::ApplyTemplate( this, mPc, 3, offsets, ((void*)(BinaryDoubleFunc)(func)),addEsp );\ break; \ } #define PROCESS_ENCODE_CALLDOUBLE3(name,func,addEsp) PROCESS_ENCODE_CALLDOUBLE3_COMMON(name,func,addEsp,) @@ -119,7 +119,7 @@ typedef float(*UnaryFloatFunc)(float); { \ PROCESS_ENCODE_READ_LAYOUT_ASMJS(name, Float2, suffix); \ int offsets[2] = { CalculateOffset(playout->F0), CalculateOffset(playout->F1) }; \ - AsmJsJitTemplate::Call_Flt::ApplyTemplate(this, mPc, 2, offsets, ((UnaryFloatFunc)(func)), addEsp); \ + AsmJsJitTemplate::Call_Flt::ApplyTemplate(this, mPc, 2, offsets, ((void*)(UnaryFloatFunc)(func)), addEsp); \ break; \ } #define PROCESS_ENCODE_CALLFLOAT2(name,func,layout) PROCESS_ENCODE_CALLFLOAT2_COMMON(name,func,layout,) diff --git a/lib/Runtime/Language/AsmJsLink.cpp b/lib/Runtime/Language/AsmJsLink.cpp index dcd81679e6f..fbcacb0e843 100644 --- a/lib/Runtime/Language/AsmJsLink.cpp +++ b/lib/Runtime/Language/AsmJsLink.cpp @@ -158,7 +158,7 @@ namespace Js{ AsmJsSIMDBuiltinFunction simdBuiltinFunc = (AsmJsSIMDBuiltinFunction)i; if (!CheckSimdLibraryMethod(scriptContext, asmSimdObject, simdBuiltinFunc)) { - AsmJSCompiler::OutputError(scriptContext, L"Asm.js Runtime Error : SIMD builtin function is invalid"); + AsmJSCompiler::OutputError(scriptContext, _u("Asm.js Runtime Error : SIMD builtin function is invalid")); return false; } } @@ -620,8 +620,8 @@ namespace Js{ } #endif - - + + bool ASMLink::CheckParams(ScriptContext* scriptContext, AsmJsModuleInfo* info, const Var stdlib, const Var foreign, const Var bufferView) { diff --git a/lib/Runtime/Language/AsmJsModule.cpp b/lib/Runtime/Language/AsmJsModule.cpp index 81039c57aef..30f8800f93a 100644 --- a/lib/Runtime/Language/AsmJsModule.cpp +++ b/lib/Runtime/Language/AsmJsModule.cpp @@ -874,7 +874,7 @@ namespace Js mathFunc = nullptr; simdFunc = nullptr; - if (!pnodeInit) + if (!pnodeInit) { return Fail(decl, _u("The righthand side of a var declaration missing an initialization (empty)")); } @@ -947,8 +947,8 @@ namespace Js { var->SetVarType(AsmJsVarType::Int); var->SetLocation(func->AcquireRegister()); - var->SetConstInitialiser(MININT); - loc = func->GetConstRegister(MININT); + var->SetConstInitialiser(INT_MIN); + loc = func->GetConstRegister(INT_MIN); } else if (ParserWrapper::IsUnsigned(pnodeInit)) { @@ -1615,13 +1615,13 @@ namespace Js simdFunctions[AsmJsSIMDBuiltin_float64x2_store1] = SIMDFunc(PropertyIds::store1, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 3, AsmJsSIMDBuiltin_float64x2_store1, OpCodeAsmJs::Simd128_StArr_D2, AsmJsRetType::Float64x2, AsmJsType::Void, AsmJsType::Int, AsmJsType::Float64x2)); #endif /* Int16x8 */ - simdFunctions[AsmJsSIMDBuiltin_Int16x8] = SIMDFunc(PropertyIds::Int16x8, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 8, AsmJsSIMDBuiltin_Int16x8, OpCodeAsmJs::Simd128_IntsToI8, AsmJsRetType::Int16x8, + simdFunctions[AsmJsSIMDBuiltin_Int16x8] = SIMDFunc(PropertyIds::Int16x8, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 8, AsmJsSIMDBuiltin_Int16x8, OpCodeAsmJs::Simd128_IntsToI8, AsmJsRetType::Int16x8, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish)); simdFunctions[AsmJsSIMDBuiltin_int16x8_check] = SIMDFunc(PropertyIds::check, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 1, AsmJsSIMDBuiltin_int16x8_check, OpCodeAsmJs::Simd128_Ld_I8 /*no dynamic checks*/, AsmJsRetType::Int16x8, AsmJsType::Int16x8)); simdFunctions[AsmJsSIMDBuiltin_int16x8_extractLane] = SIMDFunc(PropertyIds::extractLane, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int16x8_extractLane, OpCodeAsmJs::Simd128_ExtractLane_I8, AsmJsRetType::Signed, AsmJsType::Int16x8, AsmJsType::Int)); - simdFunctions[AsmJsSIMDBuiltin_int16x8_swizzle] = SIMDFunc(PropertyIds::swizzle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 9, AsmJsSIMDBuiltin_int16x8_swizzle, OpCodeAsmJs::Simd128_Swizzle_I8, AsmJsRetType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int, + simdFunctions[AsmJsSIMDBuiltin_int16x8_swizzle] = SIMDFunc(PropertyIds::swizzle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 9, AsmJsSIMDBuiltin_int16x8_swizzle, OpCodeAsmJs::Simd128_Swizzle_I8, AsmJsRetType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int)); - simdFunctions[AsmJsSIMDBuiltin_int16x8_shuffle] = SIMDFunc(PropertyIds::shuffle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 10, AsmJsSIMDBuiltin_int16x8_shuffle, OpCodeAsmJs::Simd128_Shuffle_I8, AsmJsRetType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int16x8, + simdFunctions[AsmJsSIMDBuiltin_int16x8_shuffle] = SIMDFunc(PropertyIds::shuffle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 10, AsmJsSIMDBuiltin_int16x8_shuffle, OpCodeAsmJs::Simd128_Shuffle_I8, AsmJsRetType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int)); simdFunctions[AsmJsSIMDBuiltin_int16x8_splat] = SIMDFunc(PropertyIds::splat, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 1, AsmJsSIMDBuiltin_int16x8_splat, OpCodeAsmJs::Simd128_Splat_I8, AsmJsRetType::Int16x8, AsmJsType::Intish)); simdFunctions[AsmJsSIMDBuiltin_int16x8_replaceLane] = SIMDFunc(PropertyIds::replaceLane, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 3, AsmJsSIMDBuiltin_int16x8_replaceLane, OpCodeAsmJs::Simd128_ReplaceLane_I8, AsmJsRetType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int, AsmJsType::Intish)); @@ -1643,7 +1643,7 @@ namespace Js simdFunctions[AsmJsSIMDBuiltin_int16x8_greaterThan] = SIMDFunc(PropertyIds::greaterThan, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int16x8_greaterThan, OpCodeAsmJs::Simd128_Gt_I8, AsmJsRetType::Bool16x8, AsmJsType::Int16x8, AsmJsType::Int16x8)); simdFunctions[AsmJsSIMDBuiltin_int16x8_greaterThanOrEqual] = SIMDFunc(PropertyIds::greaterThanOrEqual, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int16x8_greaterThanOrEqual, OpCodeAsmJs::Simd128_GtEq_I8, AsmJsRetType::Bool16x8, AsmJsType::Int16x8, AsmJsType::Int16x8)); simdFunctions[AsmJsSIMDBuiltin_int16x8_select] = SIMDFunc(PropertyIds::select, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 3, AsmJsSIMDBuiltin_int16x8_select, OpCodeAsmJs::Simd128_Select_I8, AsmJsRetType::Int16x8, AsmJsType::Bool16x8, AsmJsType::Int16x8, AsmJsType::Int16x8)); - + simdFunctions[AsmJsSIMDBuiltin_int16x8_addSaturate] = SIMDFunc(PropertyIds::addSaturate, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int16x8_addSaturate, OpCodeAsmJs::Simd128_AddSaturate_I8, AsmJsRetType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int16x8)); simdFunctions[AsmJsSIMDBuiltin_int16x8_subSaturate] = SIMDFunc(PropertyIds::subSaturate, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int16x8_subSaturate, OpCodeAsmJs::Simd128_SubSaturate_I8, AsmJsRetType::Int16x8, AsmJsType::Int16x8, AsmJsType::Int16x8)); simdFunctions[AsmJsSIMDBuiltin_int16x8_load] = SIMDFunc(PropertyIds::load, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int16x8_load, OpCodeAsmJs::Simd128_LdArr_I8, AsmJsRetType::Int16x8, AsmJsType::Void, AsmJsType::Int)); @@ -1671,7 +1671,7 @@ namespace Js simdFunctions[AsmJsSIMDBuiltin_int8x16_not] = SIMDFunc(PropertyIds::not_, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 1, AsmJsSIMDBuiltin_int8x16_not, OpCodeAsmJs::Simd128_Not_I16, AsmJsRetType::Int8x16, AsmJsType::Int8x16)); simdFunctions[AsmJsSIMDBuiltin_int8x16_shiftLeftByScalar] = SIMDFunc(PropertyIds::shiftLeftByScalar, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_shiftLeftByScalar, OpCodeAsmJs::Simd128_ShLtByScalar_I16, AsmJsRetType::Int8x16, AsmJsType::Int8x16, AsmJsType::Int)); simdFunctions[AsmJsSIMDBuiltin_int8x16_shiftRightByScalar] = SIMDFunc(PropertyIds::shiftRightByScalar, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_shiftRightByScalar, OpCodeAsmJs::Simd128_ShRtByScalar_I16, AsmJsRetType::Int8x16, AsmJsType::Int8x16, AsmJsType::Int)); - + simdFunctions[AsmJsSIMDBuiltin_int8x16_lessThan] = SIMDFunc(PropertyIds::lessThan, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_lessThan, OpCodeAsmJs::Simd128_Lt_I16, AsmJsRetType::Bool8x16, AsmJsType::Int8x16, AsmJsType::Int8x16)); simdFunctions[AsmJsSIMDBuiltin_int8x16_lessThanOrEqual] = SIMDFunc(PropertyIds::lessThanOrEqual, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_lessThanOrEqual, OpCodeAsmJs::Simd128_LtEq_I16, AsmJsRetType::Bool8x16, AsmJsType::Int8x16, AsmJsType::Int8x16)); simdFunctions[AsmJsSIMDBuiltin_int8x16_equal] = SIMDFunc(PropertyIds::equal, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_equal, OpCodeAsmJs::Simd128_Eq_I16, AsmJsRetType::Bool8x16, AsmJsType::Int8x16, AsmJsType::Int8x16)); @@ -1685,7 +1685,7 @@ namespace Js simdFunctions[AsmJsSIMDBuiltin_int8x16_subSaturate] = SIMDFunc(PropertyIds::subSaturate, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_subSaturate, OpCodeAsmJs::Simd128_SubSaturate_I16, AsmJsRetType::Int8x16, AsmJsType::Int8x16, AsmJsType::Int8x16)); simdFunctions[AsmJsSIMDBuiltin_int8x16_load] = SIMDFunc(PropertyIds::load, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_load, OpCodeAsmJs::Simd128_LdArr_I16, AsmJsRetType::Int8x16, AsmJsType::Void, AsmJsType::Int)); simdFunctions[AsmJsSIMDBuiltin_int8x16_store] = SIMDFunc(PropertyIds::store, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 3, AsmJsSIMDBuiltin_int8x16_store, OpCodeAsmJs::Simd128_StArr_I16, AsmJsRetType::Int8x16, AsmJsType::Void, AsmJsType::Int, AsmJsType::Int8x16)); - + simdFunctions[AsmJsSIMDBuiltin_int8x16_extractLane] = SIMDFunc(PropertyIds::extractLane, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_int8x16_extractLane, OpCodeAsmJs::Simd128_ExtractLane_I16, AsmJsRetType::Signed, AsmJsType::Int8x16, AsmJsType::Int)); simdFunctions[AsmJsSIMDBuiltin_int8x16_replaceLane] = SIMDFunc(PropertyIds::replaceLane, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 3, AsmJsSIMDBuiltin_int8x16_replaceLane, OpCodeAsmJs::Simd128_ReplaceLane_I16, AsmJsRetType::Int8x16, AsmJsType::Int8x16, AsmJsType::Int, AsmJsType::Intish)); simdFunctions[AsmJsSIMDBuiltin_int8x16_swizzle] = SIMDFunc(PropertyIds::swizzle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 17, AsmJsSIMDBuiltin_int8x16_swizzle, OpCodeAsmJs::Simd128_Swizzle_I16, AsmJsRetType::Int8x16, AsmJsType::Int8x16, @@ -1720,7 +1720,7 @@ namespace Js simdFunctions[AsmJsSIMDBuiltin_uint32x4_mul ]= SIMDFunc(PropertyIds::mul, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_uint32x4_mul, OpCodeAsmJs::Simd128_Mul_U4, AsmJsRetType::Uint32x4, AsmJsType::Uint32x4, AsmJsType::Uint32x4)); simdFunctions[AsmJsSIMDBuiltin_uint32x4_shiftLeftByScalar ]= SIMDFunc(PropertyIds::shiftLeftByScalar, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_uint32x4_shiftLeftByScalar, OpCodeAsmJs::Simd128_ShLtByScalar_U4, AsmJsRetType::Uint32x4, AsmJsType::Uint32x4, AsmJsType::Int)); simdFunctions[AsmJsSIMDBuiltin_uint32x4_shiftRightByScalar ]= SIMDFunc(PropertyIds::shiftRightByScalar, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_uint32x4_shiftRightByScalar, OpCodeAsmJs::Simd128_ShRtByScalar_U4, AsmJsRetType::Uint32x4, AsmJsType::Uint32x4, AsmJsType::Int)); - + simdFunctions[AsmJsSIMDBuiltin_uint32x4_lessThan] = SIMDFunc(PropertyIds::lessThan, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_uint32x4_lessThan, OpCodeAsmJs::Simd128_Lt_U4, AsmJsRetType::Bool32x4, AsmJsType::Uint32x4, AsmJsType::Uint32x4)); simdFunctions[AsmJsSIMDBuiltin_uint32x4_lessThanOrEqual] = SIMDFunc(PropertyIds::lessThanOrEqual, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_uint32x4_lessThanOrEqual, OpCodeAsmJs::Simd128_LtEq_U4, AsmJsRetType::Bool32x4, AsmJsType::Uint32x4, AsmJsType::Uint32x4)); simdFunctions[AsmJsSIMDBuiltin_uint32x4_equal] = SIMDFunc(PropertyIds::equal, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_uint32x4_equal, OpCodeAsmJs::Simd128_Eq_U4, AsmJsRetType::Bool32x4, AsmJsType::Uint32x4, AsmJsType::Uint32x4)); @@ -1752,7 +1752,7 @@ namespace Js simdFunctions[AsmJsSIMDBuiltin_uint16x8_extractLane ]= SIMDFunc(PropertyIds::extractLane, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 2, AsmJsSIMDBuiltin_uint16x8_extractLane, OpCodeAsmJs::Simd128_ExtractLane_U8, AsmJsRetType::Unsigned, AsmJsType::Uint16x8, AsmJsType::Int)); simdFunctions[AsmJsSIMDBuiltin_uint16x8_swizzle ]= SIMDFunc(PropertyIds::swizzle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 9, AsmJsSIMDBuiltin_uint16x8_swizzle, OpCodeAsmJs::Simd128_Swizzle_U8, AsmJsRetType::Uint16x8, AsmJsType::Uint16x8, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int)); - simdFunctions[AsmJsSIMDBuiltin_uint16x8_shuffle ]= SIMDFunc(PropertyIds::shuffle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 10, AsmJsSIMDBuiltin_uint16x8_shuffle, OpCodeAsmJs::Simd128_Shuffle_U8, AsmJsRetType::Uint16x8, AsmJsType::Uint16x8, AsmJsType::Uint16x8, + simdFunctions[AsmJsSIMDBuiltin_uint16x8_shuffle ]= SIMDFunc(PropertyIds::shuffle, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 10, AsmJsSIMDBuiltin_uint16x8_shuffle, OpCodeAsmJs::Simd128_Shuffle_U8, AsmJsRetType::Uint16x8, AsmJsType::Uint16x8, AsmJsType::Uint16x8, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int, AsmJsType::Int)); simdFunctions[AsmJsSIMDBuiltin_uint16x8_splat ]= SIMDFunc(PropertyIds::splat, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 1, AsmJsSIMDBuiltin_uint16x8_splat, OpCodeAsmJs::Simd128_Splat_U8, AsmJsRetType::Uint16x8, AsmJsType::Intish)); simdFunctions[AsmJsSIMDBuiltin_uint16x8_replaceLane ]= SIMDFunc(PropertyIds::replaceLane, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 3, AsmJsSIMDBuiltin_uint16x8_replaceLane, OpCodeAsmJs::Simd128_ReplaceLane_U8, AsmJsRetType::Uint16x8, AsmJsType::Uint16x8, AsmJsType::Int, AsmJsType::Intish)); @@ -1860,7 +1860,7 @@ namespace Js simdFunctions[AsmJsSIMDBuiltin_bool16x8_not] = SIMDFunc(PropertyIds::not_, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 1, AsmJsSIMDBuiltin_bool16x8_not, OpCodeAsmJs::Simd128_Not_B8, AsmJsRetType::Bool16x8, AsmJsType::Bool16x8)); simdFunctions[AsmJsSIMDBuiltin_bool16x8_anyTrue] = SIMDFunc(PropertyIds::anyTrue, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 1, AsmJsSIMDBuiltin_bool16x8_anyTrue, OpCodeAsmJs::Simd128_AnyTrue_B8, AsmJsRetType::Signed, AsmJsType::Bool16x8)); simdFunctions[AsmJsSIMDBuiltin_bool16x8_allTrue] = SIMDFunc(PropertyIds::allTrue, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 1, AsmJsSIMDBuiltin_bool16x8_allTrue, OpCodeAsmJs::Simd128_AllTrue_B8, AsmJsRetType::Signed, AsmJsType::Bool16x8)); - + /* Bool8x16 builtins*/ //------------------- simdFunctions[AsmJsSIMDBuiltin_Bool8x16] = SIMDFunc(PropertyIds::Bool8x16, Anew(&mAllocator, AsmJsSIMDFunction, nullptr, &mAllocator, 16, AsmJsSIMDBuiltin_Bool8x16, OpCodeAsmJs::Simd128_IntsToB16, AsmJsRetType::Bool8x16, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish, AsmJsType::Intish)); @@ -1913,7 +1913,7 @@ namespace Js map = &mStdLibSIMDBool8x16Map; break; } - + if (simdFunctions[i].id && simdFunctions[i].val) { if (!AddStandardLibrarySIMDNameInMap(simdFunctions[i].id, simdFunctions[i].val, map)) @@ -1968,7 +1968,7 @@ namespace Js , mStdLibSIMDBool8x16Map(&mAllocator) , mStdLibSIMDFloat32x4Map(&mAllocator) , mStdLibSIMDFloat64x2Map(&mAllocator) - + { InitModuleNode( parser ); } @@ -2125,7 +2125,7 @@ namespace Js { var->SetVarType(AsmJsVarType::Int); var->SetLocation(mIntVarSpace.AcquireRegister()); - var->SetConstInitialiser(MININT); + var->SetConstInitialiser(INT_MIN); } else if (ParserWrapper::IsUnsigned(pnode)) { @@ -2420,7 +2420,7 @@ namespace Js { mModuleMemory.mMemorySize = (int)((mModuleMemory.mSimdOffset + mSimdVarSpace.GetTotalVarCount()) * SIMD_SLOTS_SPACE); } - + } } @@ -2866,7 +2866,7 @@ namespace Js return LookupStdLibSIMDNameInMap(fieldName, simdFunc, &mStdLibSIMDUint16x8Map); case PropertyIds::Uint8x16: return LookupStdLibSIMDNameInMap(fieldName, simdFunc, &mStdLibSIMDUint8x16Map); - + default: AssertMsg(false, "Invalid SIMD type"); return false; @@ -3077,7 +3077,7 @@ namespace Js default: Assert(UNREACHED); } - + } else if (nop == (uint)knopFlt) { diff --git a/lib/Runtime/Language/AsmJsModule.h b/lib/Runtime/Language/AsmJsModule.h index 13ad0c321b1..9729b7e7d58 100644 --- a/lib/Runtime/Language/AsmJsModule.h +++ b/lib/Runtime/Language/AsmJsModule.h @@ -72,7 +72,7 @@ namespace Js { typedef Js::Tick AsmJsCompileTime; namespace AsmJsLookupSource { - enum Source + enum Source: int { AsmJsModule, AsmJsFunction }; diff --git a/lib/Runtime/Language/AsmJsTypes.cpp b/lib/Runtime/Language/AsmJsTypes.cpp index 46f5f1172f0..98c3c5ab041 100644 --- a/lib/Runtime/Language/AsmJsTypes.cpp +++ b/lib/Runtime/Language/AsmJsTypes.cpp @@ -800,7 +800,7 @@ namespace Js va_start( arguments, retType ); for(ArgSlot iArg = 0; iArg < argCount; iArg++) { - SetArgType(va_arg(arguments, AsmJsType), iArg); + SetArgType(static_cast(va_arg(arguments, int)), iArg); } va_end(arguments); } @@ -1192,7 +1192,7 @@ namespace Js va_start(arguments, retType); for (ArgSlot iArg = 0; iArg < argCount; iArg++) { - SetArgType(va_arg(arguments, AsmJsType), iArg); + SetArgType(static_cast(va_arg(arguments, int)), iArg); } va_end(arguments); } diff --git a/lib/Runtime/Language/AsmJsTypes.h b/lib/Runtime/Language/AsmJsTypes.h index a4cb069128b..18ae557330f 100644 --- a/lib/Runtime/Language/AsmJsTypes.h +++ b/lib/Runtime/Language/AsmJsTypes.h @@ -51,7 +51,7 @@ namespace Js namespace ArrayBufferView { - enum ViewType + enum ViewType: int { TYPE_INT8 = 0, TYPE_UINT8, @@ -66,7 +66,7 @@ namespace Js } /* namespace ArrayBufferView */ // The asm.js spec recognizes this set of builtin Math functions. - enum AsmJSMathBuiltinFunction + enum AsmJSMathBuiltinFunction: int { #define ASMJS_MATH_FUNC_NAMES(name, propertyName) AsmJSMathBuiltin_##name, #include "AsmJsBuiltInNames.h" @@ -699,7 +699,7 @@ namespace Js { if( !mConstMap.ContainsKey( val ) ) { - mConstMap.Add( val, AcquireConstRegister() ); + mConstMap.Add( val, this->AcquireConstRegister() ); } } diff --git a/lib/Runtime/Language/AsmJsUtils.cpp b/lib/Runtime/Language/AsmJsUtils.cpp index 091be90da4e..319ce8741c9 100644 --- a/lib/Runtime/Language/AsmJsUtils.cpp +++ b/lib/Runtime/Language/AsmJsUtils.cpp @@ -287,21 +287,21 @@ namespace Js case AsmJsType::Bool32x4: if (!JavascriptSIMDBool32x4::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool32x4TypeMismatch, L"Bool32x4"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool32x4TypeMismatch, _u("Bool32x4")); } simdVal = ((JavascriptSIMDBool32x4*)(*origArgs))->GetValue(); break; case AsmJsType::Bool16x8: if (!JavascriptSIMDBool16x8::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool16x8TypeMismatch, L"Bool16x8"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool16x8TypeMismatch, _u("Bool16x8")); } simdVal = ((JavascriptSIMDBool16x8*)(*origArgs))->GetValue(); break; case AsmJsType::Bool8x16: if (!JavascriptSIMDBool8x16::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool8x16TypeMismatch, L"Bool8x16"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool8x16TypeMismatch, _u("Bool8x16")); } simdVal = ((JavascriptSIMDBool8x16*)(*origArgs))->GetValue(); break; @@ -322,35 +322,35 @@ namespace Js case AsmJsType::Int16x8: if (!JavascriptSIMDInt16x8::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt16x8TypeMismatch, L"Int16x8"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt16x8TypeMismatch, _u("Int16x8")); } simdVal = ((JavascriptSIMDInt16x8*)(*origArgs))->GetValue(); break; case AsmJsType::Int8x16: if (!JavascriptSIMDInt8x16::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt8x16TypeMismatch, L"Int8x16"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt8x16TypeMismatch, _u("Int8x16")); } simdVal = ((JavascriptSIMDInt8x16*)(*origArgs))->GetValue(); break; case AsmJsType::Uint32x4: if (!JavascriptSIMDUint32x4::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint32x4TypeMismatch, L"Uint32x4"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint32x4TypeMismatch, _u("Uint32x4")); } simdVal = ((JavascriptSIMDUint32x4*)(*origArgs))->GetValue(); break; case AsmJsType::Uint16x8: if (!JavascriptSIMDUint16x8::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint16x8TypeMismatch, L"Uint16x8"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint16x8TypeMismatch, _u("Uint16x8")); } simdVal = ((JavascriptSIMDUint16x8*)(*origArgs))->GetValue(); break; case AsmJsType::Uint8x16: if (!JavascriptSIMDUint8x16::Is(*origArgs)) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint8x16TypeMismatch, L"Uint8x16"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint8x16TypeMismatch, _u("Uint8x16")); } simdVal = ((JavascriptSIMDUint8x16*)(*origArgs))->GetValue(); break; @@ -560,21 +560,21 @@ namespace Js case AsmJsType::Bool32x4: if (i >= argInCount || !JavascriptSIMDBool32x4::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool32x4TypeMismatch, L"Bool32x4"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool32x4TypeMismatch, _u("Bool32x4")); } simdVal = ((JavascriptSIMDBool32x4*)(args.Values[i + 1]))->GetValue(); break; case AsmJsType::Bool16x8: if (i >= argInCount || !JavascriptSIMDBool16x8::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool16x8TypeMismatch, L"Bool16x8"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool16x8TypeMismatch, _u("Bool16x8")); } simdVal = ((JavascriptSIMDBool16x8*)(args.Values[i + 1]))->GetValue(); break; case AsmJsType::Bool8x16: if (i >= argInCount || !JavascriptSIMDBool8x16::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool8x16TypeMismatch, L"Bool8x16"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdBool8x16TypeMismatch, _u("Bool8x16")); } simdVal = ((JavascriptSIMDBool8x16*)(args.Values[i + 1]))->GetValue(); break; @@ -595,35 +595,35 @@ namespace Js case AsmJsType::Int16x8: if (i >= argInCount || !JavascriptSIMDInt16x8::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt16x8TypeMismatch, L"Int16x8"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt16x8TypeMismatch, _u("Int16x8")); } simdVal = ((JavascriptSIMDInt16x8*)(args.Values[i + 1]))->GetValue(); break; case AsmJsType::Int8x16: if (i >= argInCount || !JavascriptSIMDInt8x16::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt8x16TypeMismatch, L"Int8x16"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdInt8x16TypeMismatch, _u("Int8x16")); } simdVal = ((JavascriptSIMDInt8x16*)(args.Values[i + 1]))->GetValue(); break; case AsmJsType::Uint32x4: if (i >= argInCount || !JavascriptSIMDUint32x4::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint32x4TypeMismatch, L"Uint32x4"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint32x4TypeMismatch, _u("Uint32x4")); } simdVal = ((JavascriptSIMDUint32x4*)(args.Values[i + 1]))->GetValue(); break; case AsmJsType::Uint16x8: if (i >= argInCount || !JavascriptSIMDUint16x8::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint16x8TypeMismatch, L"Uint16x8"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint16x8TypeMismatch, _u("Uint16x8")); } simdVal = ((JavascriptSIMDUint16x8*)(args.Values[i + 1]))->GetValue(); break; case AsmJsType::Uint8x16: if (i >= argInCount || !JavascriptSIMDUint8x16::Is(args.Values[i + 1])) { - JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint8x16TypeMismatch, L"Uint8x16"); + JavascriptError::ThrowTypeError(scriptContext, JSERR_SimdUint8x16TypeMismatch, _u("Uint8x16")); } simdVal = ((JavascriptSIMDUint8x16*)(args.Values[i + 1]))->GetValue(); break; diff --git a/lib/Runtime/Language/CMakeLists.txt b/lib/Runtime/Language/CMakeLists.txt index 20f87816ab4..df3581c4b5d 100644 --- a/lib/Runtime/Language/CMakeLists.txt +++ b/lib/Runtime/Language/CMakeLists.txt @@ -1,3 +1,7 @@ +if(BuildJIT) + add_definitions(-D_ENABLE_DYNAMIC_THUNKS=1) +endif() + set(CRL_SOURCE_FILES ${CRL_SOURCE_FILES} AsmJs.cpp AsmJsByteCodeGenerator.cpp @@ -26,32 +30,33 @@ set(CRL_SOURCE_FILES ${CRL_SOURCE_FILES} JavascriptStackWalker.cpp ModuleNamespace.cpp ModuleNamespaceEnumerator.cpp + ObjTypeSpecFldInfo.cpp ProfilingHelpers.cpp #ReadOnlyDynamicProfileInfo.cpp RuntimeLanguagePch.cpp - # SimdBool16x8Operation.cpp - # SimdBool16x8OperationX86X64.cpp - # SimdBool32x4Operation.cpp - # SimdBool32x4OperationX86X64.cpp - # SimdBool8x16Operation.cpp - # SimdBool8x16OperationX86X64.cpp - # SimdFloat32x4Operation.cpp - # SimdFloat32x4OperationX86X64.cpp - # SimdFloat64x2Operation.cpp - # SimdFloat64x2OperationX86X64.cpp - # SimdInt16x8Operation.cpp - # SimdInt16x8OperationX86X64.cpp - # SimdInt32x4Operation.cpp - # SimdInt32x4OperationX86X64.cpp - # SimdInt8x16Operation.cpp - # SimdInt8x16OperationX86X64.cpp - # SimdUint16x8Operation.cpp - # SimdUint16x8OperationX86X64.cpp - # SimdUint32x4Operation.cpp - # SimdUint32x4OperationX86X64.cpp - # SimdUint8x16Operation.cpp - # SimdUint8x16OperationX86X64.cpp - # SimdUtils.cpp + SimdBool16x8Operation.cpp + SimdBool16x8OperationX86X64.cpp + SimdBool32x4Operation.cpp + SimdBool32x4OperationX86X64.cpp + SimdBool8x16Operation.cpp + SimdBool8x16OperationX86X64.cpp + SimdFloat32x4Operation.cpp + SimdFloat32x4OperationX86X64.cpp + SimdFloat64x2Operation.cpp + SimdFloat64x2OperationX86X64.cpp + SimdInt16x8Operation.cpp + SimdInt16x8OperationX86X64.cpp + SimdInt32x4Operation.cpp + SimdInt32x4OperationX86X64.cpp + SimdInt8x16Operation.cpp + SimdInt8x16OperationX86X64.cpp + SimdUint16x8Operation.cpp + SimdUint16x8OperationX86X64.cpp + SimdUint32x4Operation.cpp + SimdUint32x4OperationX86X64.cpp + SimdUint8x16Operation.cpp + SimdUint8x16OperationX86X64.cpp + SimdUtils.cpp SourceDynamicProfileManager.cpp SourceTextModuleRecord.cpp StackTraceArguments.cpp @@ -64,6 +69,7 @@ if(CC_TARGETS_AMD64) amd64/AsmJsJitTemplate.cpp amd64/StackFrame.SystemV.cpp amd64/JavascriptOperatorsA.S + amd64/amd64_Thunks.S ) elseif(CC_TARGETS_X86) set (CRL_SOURCE_FILES ${CRL_SOURCE_FILES} diff --git a/lib/Runtime/Language/DynamicProfileInfo.cpp b/lib/Runtime/Language/DynamicProfileInfo.cpp index b5b36f0b3b8..1a15bffe2f0 100644 --- a/lib/Runtime/Language/DynamicProfileInfo.cpp +++ b/lib/Runtime/Language/DynamicProfileInfo.cpp @@ -338,7 +338,7 @@ namespace Js } else { - Assert(directEntryPoint == ProfileEntryThunk || functionBody->GetScriptContext()->IsNativeAddress(directEntryPoint)); + Assert(directEntryPoint == ProfileEntryThunk || functionBody->GetScriptContext()->IsNativeAddress((void*)directEntryPoint)); Assert(functionBody->HasExecutionDynamicProfileInfo()); } @@ -841,7 +841,7 @@ namespace Js return &arrayCallSiteInfo[index]; } - inline void DynamicProfileInfo::RecordFieldAccess(FunctionBody* functionBody, uint fieldAccessId, Var object, FldInfoFlags flags) + void DynamicProfileInfo::RecordFieldAccess(FunctionBody* functionBody, uint fieldAccessId, Var object, FldInfoFlags flags) { Assert(fieldAccessId < functionBody->GetProfiledFldCount()); FldInfoFlags oldFlags = fldInfo[fieldAccessId].flags; @@ -881,7 +881,7 @@ namespace Js } } - inline void DynamicProfileInfo::RecordDivideResultType(FunctionBody* body, ProfileId divideId, Var object) + void DynamicProfileInfo::RecordDivideResultType(FunctionBody* body, ProfileId divideId, Var object) { Assert(divideId < body->GetProfiledDivOrRemCount()); divideTypeInfo[divideId] = divideTypeInfo[divideId].Merge(object); @@ -922,7 +922,7 @@ namespace Js return divideTypeInfo[divideId]; } - inline void DynamicProfileInfo::RecordSwitchType(FunctionBody* body, ProfileId switchId, Var object) + void DynamicProfileInfo::RecordSwitchType(FunctionBody* body, ProfileId switchId, Var object) { Assert(switchId < body->GetProfiledSwitchCount()); switchTypeInfo[switchId] = switchTypeInfo[switchId].Merge(object); @@ -943,12 +943,12 @@ namespace Js _u("New profile cache state: %d\n"), this->polymorphicCacheState); } - inline void DynamicProfileInfo::RecordPolymorphicFieldAccess(FunctionBody* functionBody, uint fieldAccessId) + void DynamicProfileInfo::RecordPolymorphicFieldAccess(FunctionBody* functionBody, uint fieldAccessId) { this->RecordFieldAccess(functionBody, fieldAccessId, nullptr, FldInfo_Polymorphic); } - inline void DynamicProfileInfo::RecordSlotLoad(FunctionBody* functionBody, ProfileId slotLoadId, Var object) + void DynamicProfileInfo::RecordSlotLoad(FunctionBody* functionBody, ProfileId slotLoadId, Var object) { Assert(slotLoadId < functionBody->GetProfiledSlotCount()); slotInfo[slotLoadId] = slotInfo[slotLoadId].Merge(object); @@ -959,7 +959,7 @@ namespace Js return static_cast(oldFlags | newFlags); } - inline void DynamicProfileInfo::RecordParameterInfo(FunctionBody *functionBody, ArgSlot index, Var object) + void DynamicProfileInfo::RecordParameterInfo(FunctionBody *functionBody, ArgSlot index, Var object) { Assert(this->parameterInfo != nullptr); Assert(index < functionBody->GetProfiledInParamsCount()); @@ -973,13 +973,13 @@ namespace Js return parameterInfo[index]; } - inline void DynamicProfileInfo::RecordReturnTypeOnCallSiteInfo(FunctionBody* functionBody, ProfileId callSiteId, Var object) + void DynamicProfileInfo::RecordReturnTypeOnCallSiteInfo(FunctionBody* functionBody, ProfileId callSiteId, Var object) { Assert(callSiteId < functionBody->GetProfiledCallSiteCount()); this->callSiteInfo[callSiteId].returnType = this->callSiteInfo[callSiteId].returnType.Merge(object); } - inline void DynamicProfileInfo::RecordReturnType(FunctionBody* functionBody, ProfileId callSiteId, Var object) + void DynamicProfileInfo::RecordReturnType(FunctionBody* functionBody, ProfileId callSiteId, Var object) { Assert(callSiteId < functionBody->GetProfiledReturnTypeCount()); this->returnTypeInfo[callSiteId] = this->returnTypeInfo[callSiteId].Merge(object); @@ -998,7 +998,7 @@ namespace Js return this->returnTypeInfo[callSiteId]; } - inline void DynamicProfileInfo::RecordThisInfo(Var object, ThisType thisType) + void DynamicProfileInfo::RecordThisInfo(Var object, ThisType thisType) { this->thisInfo.valueType = this->thisInfo.valueType.Merge(object); this->thisInfo.thisType = max(this->thisInfo.thisType, thisType); diff --git a/lib/Runtime/Language/DynamicProfileStorage.cpp b/lib/Runtime/Language/DynamicProfileStorage.cpp index 89cf3386df5..5c5c99cc687 100644 --- a/lib/Runtime/Language/DynamicProfileStorage.cpp +++ b/lib/Runtime/Language/DynamicProfileStorage.cpp @@ -17,7 +17,7 @@ char16 DynamicProfileStorage::cacheDir[_MAX_DIR]; char16 DynamicProfileStorage::catalogFilename[_MAX_PATH]; CriticalSection DynamicProfileStorage::cs; DynamicProfileStorage::InfoMap DynamicProfileStorage::infoMap(&NoCheckHeapAllocator::Instance); -DWORD DynamicProfileStorage::creationTime = 0; +DynamicProfileStorage::TimeType DynamicProfileStorage::creationTime = DynamicProfileStorage::TimeType(); int32 DynamicProfileStorage::lastOffset = 0; DWORD const DynamicProfileStorage::MagicNumber = 20100526; DWORD const DynamicProfileStorage::FileFormatVersion = 2; @@ -796,7 +796,7 @@ bool DynamicProfileStorage::CreateCacheCatalog() Assert(useCacheDir); Assert(locked); nextFileId = 0; - creationTime = _time32(NULL); + creationTime = GetCreationTime(); DynamicProfileStorageReaderWriter catalogFile; if (!catalogFile.Init(catalogFilename, _u("wb"), true) || !catalogFile.Write(MagicNumber) diff --git a/lib/Runtime/Language/DynamicProfileStorage.h b/lib/Runtime/Language/DynamicProfileStorage.h index 0e6fed63944..b6fb47e4396 100644 --- a/lib/Runtime/Language/DynamicProfileStorage.h +++ b/lib/Runtime/Language/DynamicProfileStorage.h @@ -50,7 +50,14 @@ class DynamicProfileStorage static char16 catalogFilename[_MAX_PATH]; static DWORD const MagicNumber; static DWORD const FileFormatVersion; - static DWORD creationTime; +#ifdef _WIN32 + typedef DWORD TimeType; + static inline TimeType GetCreationTime() { return _time32(NULL); } +#else + typedef time_t TimeType; + static inline TimeType GetCreationTime() { return time(NULL); } +#endif + static TimeType creationTime; static int32 lastOffset; static HANDLE mutex; static CriticalSection cs; diff --git a/lib/Runtime/Language/InterpreterStackFrame.cpp b/lib/Runtime/Language/InterpreterStackFrame.cpp index 77c588d2887..601ce223d4f 100644 --- a/lib/Runtime/Language/InterpreterStackFrame.cpp +++ b/lib/Runtime/Language/InterpreterStackFrame.cpp @@ -1663,7 +1663,7 @@ namespace Js } #endif - bool InterpreterStackFrame::IsDelayDynamicInterpreterThunk(void * entryPoint) + bool InterpreterStackFrame::IsDelayDynamicInterpreterThunk(JavascriptMethod entryPoint) { return #if DYNAMIC_INTERPRETER_THUNK @@ -1779,7 +1779,7 @@ namespace Js #if ENABLE_PROFILE_INFO DynamicProfileInfo * dynamicProfileInfo = nullptr; const bool doProfile = executeFunction->GetInterpreterExecutionMode(false) == ExecutionMode::ProfilingInterpreter || - executeFunction->IsInDebugMode() && DynamicProfileInfo::IsEnabled(executeFunction); + (executeFunction->IsInDebugMode() && DynamicProfileInfo::IsEnabled(executeFunction)); if (doProfile) { #if !DYNAMIC_INTERPRETER_THUNK @@ -2057,18 +2057,18 @@ namespace Js { case Js::AsmJsRetType::Double: { - entryPoint = (AsmJsInterpreterDoubleEP)Js::InterpreterStackFrame::AsmJsInterpreter < double > ; + entryPoint = (void*)(AsmJsInterpreterDoubleEP)Js::InterpreterStackFrame::AsmJsInterpreter < double > ; break; } case Js::AsmJsRetType::Float: { - entryPoint = (AsmJsInterpreterFloatEP)Js::InterpreterStackFrame::AsmJsInterpreter < float > ; + entryPoint = (void*)(AsmJsInterpreterFloatEP)Js::InterpreterStackFrame::AsmJsInterpreter < float > ; break; } case Js::AsmJsRetType::Signed: case Js::AsmJsRetType::Void: { - entryPoint = (AsmJsInterpreterIntEP)Js::InterpreterStackFrame::AsmJsInterpreter < int > ; + entryPoint = (void*)(AsmJsInterpreterIntEP)Js::InterpreterStackFrame::AsmJsInterpreter < int > ; break; } case Js::AsmJsRetType::Int32x4: @@ -2083,7 +2083,7 @@ namespace Js case Js::AsmJsRetType::Uint16x8: case Js::AsmJsRetType::Uint8x16: { - entryPoint = Js::InterpreterStackFrame::AsmJsInterpreterSimdJs; + entryPoint = (void*)Js::InterpreterStackFrame::AsmJsInterpreterSimdJs; break; } default: @@ -2630,44 +2630,47 @@ namespace Js } threadContext->SetDisableImplicitFlags(prevDisableImplicitFlags); threadContext->SetImplicitCallFlags(saveImplicitcallFlags); - FrameDisplay* pDisplay = RecyclerNewPlus(scriptContext->GetRecycler(), sizeof(void*), FrameDisplay, 1); - pDisplay->SetItem( 0, moduleMemoryPtr ); - for (int i = 0; i < info->GetFunctionCount(); i++) + // scope { - const auto& modFunc = info->GetFunction(i); + FrameDisplay* pDisplay = RecyclerNewPlus(scriptContext->GetRecycler(), sizeof(void*), FrameDisplay, 1); + pDisplay->SetItem( 0, moduleMemoryPtr ); + for (int i = 0; i < info->GetFunctionCount(); i++) + { + const auto& modFunc = info->GetFunction(i); - // TODO: add more runtime checks here - auto proxy = m_functionBody->GetNestedFuncReference(i); + // TODO: add more runtime checks here + auto proxy = m_functionBody->GetNestedFuncReference(i); - AsmJsScriptFunction* scriptFuncObj = (AsmJsScriptFunction*)ScriptFunction::OP_NewScFunc(pDisplay, (FunctionProxy**)proxy); - localModuleFunctions[modFunc.location] = scriptFuncObj; - if (i == 0 && info->GetUsesChangeHeap()) - { - scriptFuncObj->GetDynamicType()->SetEntryPoint(AsmJsChangeHeapBuffer); - } - else - { - if (scriptFuncObj->GetDynamicType()->GetEntryPoint() == DefaultDeferredDeserializeThunk) + AsmJsScriptFunction* scriptFuncObj = (AsmJsScriptFunction*)ScriptFunction::OP_NewScFunc(pDisplay, (FunctionProxy**)proxy); + localModuleFunctions[modFunc.location] = scriptFuncObj; + if (i == 0 && info->GetUsesChangeHeap()) { - JavascriptFunction::DeferredDeserialize(scriptFuncObj); + scriptFuncObj->GetDynamicType()->SetEntryPoint(AsmJsChangeHeapBuffer); } - scriptFuncObj->GetDynamicType()->SetEntryPoint(AsmJsExternalEntryPoint); - scriptFuncObj->GetFunctionBody()->GetAsmJsFunctionInfo()->SetModuleFunctionBody(asmJsModuleFunctionBody); - } - scriptFuncObj->SetModuleMemory(moduleMemoryPtr); - if (!info->IsRuntimeProcessed()) - { - // don't reset entrypoint upon relinking - FunctionEntryPointInfo* entrypointInfo = (FunctionEntryPointInfo*)scriptFuncObj->GetEntryPointInfo(); - entrypointInfo->SetIsAsmJSFunction(true); - entrypointInfo->SetModuleAddress((uintptr_t)moduleMemoryPtr); - -#if DYNAMIC_INTERPRETER_THUNK - if (!PHASE_ON1(AsmJsJITTemplatePhase)) + else { - entrypointInfo->jsMethod = AsmJsDefaultEntryThunk; + if (scriptFuncObj->GetDynamicType()->GetEntryPoint() == DefaultDeferredDeserializeThunk) + { + JavascriptFunction::DeferredDeserialize(scriptFuncObj); + } + scriptFuncObj->GetDynamicType()->SetEntryPoint(AsmJsExternalEntryPoint); + scriptFuncObj->GetFunctionBody()->GetAsmJsFunctionInfo()->SetModuleFunctionBody(asmJsModuleFunctionBody); + } + scriptFuncObj->SetModuleMemory(moduleMemoryPtr); + if (!info->IsRuntimeProcessed()) + { + // don't reset entrypoint upon relinking + FunctionEntryPointInfo* entrypointInfo = (FunctionEntryPointInfo*)scriptFuncObj->GetEntryPointInfo(); + entrypointInfo->SetIsAsmJSFunction(true); + entrypointInfo->SetModuleAddress((uintptr_t)moduleMemoryPtr); + + #if DYNAMIC_INTERPRETER_THUNK + if (!PHASE_ON1(AsmJsJITTemplatePhase)) + { + entrypointInfo->jsMethod = AsmJsDefaultEntryThunk; + } + #endif } -#endif } } @@ -2727,9 +2730,11 @@ namespace Js // export only 1 function - Var exportFunc = localModuleFunctions[info->GetExportFunctionIndex()]; - SetReg((RegSlot)0, exportFunc); - return exportFunc; + { + Var exportFunc = localModuleFunctions[info->GetExportFunctionIndex()]; + SetReg((RegSlot)0, exportFunc); + return exportFunc; + } linkFailure: threadContext->SetDisableImplicitFlags(prevDisableImplicitFlags); @@ -2754,7 +2759,7 @@ namespace Js ScriptFunction::ReparseAsmJsModule(&funcObj); const bool doProfile = funcObj->GetFunctionBody()->GetInterpreterExecutionMode(false) == ExecutionMode::ProfilingInterpreter || - funcObj->GetFunctionBody()->IsInDebugMode() && DynamicProfileInfo::IsEnabled(funcObj->GetFunctionBody()); + (funcObj->GetFunctionBody()->IsInDebugMode() && DynamicProfileInfo::IsEnabled(funcObj->GetFunctionBody())); DynamicProfileInfo * dynamicProfileInfo = nullptr; if (doProfile) @@ -2788,7 +2793,7 @@ namespace Js } #if DBG - Js::RecyclableObject * invalidStackVar = (Js::RecyclableObject*)_alloca(sizeof(Js::RecyclableObject)); + Var invalidStackVar = (Js::RecyclableObject*)_alloca(sizeof(Js::RecyclableObject)); memset(invalidStackVar, 0xFE, sizeof(Js::RecyclableObject)); InterpreterStackFrame * newInstance = newInstance = setup.InitializeAllocation(allocation, funcObj->GetFunctionBody()->GetHasImplicitArgIns(), doProfile, nullptr, stackAddr, invalidStackVar); #else @@ -3017,8 +3022,14 @@ namespace Js // IAT xmm2 spill // IAT xmm1 spill <- floatSpillAddress for arg1 +#ifdef _WIN32 +#define FLOAT_SPILL_ADDRESS_OFFSET_WORDS 15 +#else +// On Sys V x64 we have 4 words less (4 reg shadow) +#define FLOAT_SPILL_ADDRESS_OFFSET_WORDS 11 +#endif // floats are spilled as xmmwords - uintptr_t floatSpillAddress = (uintptr_t)m_inParams - MachPtr * (15 - 2*i); + uintptr_t floatSpillAddress = (uintptr_t)m_inParams - MachPtr * (FLOAT_SPILL_ADDRESS_OFFSET_WORDS - 2*i); if (info->GetArgType(i).isInt()) { @@ -6104,7 +6115,8 @@ const byte * InterpreterStackFrame::OP_ProfiledLoopBodyStart(const byte * ip) // and do ISB only for 1st time this entry point is called (potential working set regression though). _InstructionSynchronizationBarrier(); #endif - uint newOffset = ::Math::PointerCastToIntegral(address(function, CallInfo(CallFlags_InternalFrame, 1), this)); + uint newOffset = ::Math::PointerCastToIntegral( + CALL_ENTRYPOINT(address, function, CallInfo(CallFlags_InternalFrame, 1), this)); #ifdef _M_IX86 _asm @@ -6137,7 +6149,8 @@ const byte * InterpreterStackFrame::OP_ProfiledLoopBodyStart(const byte * ip) // and do ISB only for 1st time this entry point is called (potential working set regression though). _InstructionSynchronizationBarrier(); #endif - uint newOffset = ::Math::PointerCastToIntegral(address(function, CallInfo(CallFlags_InternalFrame, 1), this)); + uint newOffset = ::Math::PointerCastToIntegral( + CALL_ENTRYPOINT(address, function, CallInfo(CallFlags_InternalFrame, 1), this)); #ifdef _M_IX86 _asm @@ -7171,7 +7184,7 @@ const byte * InterpreterStackFrame::OP_ProfiledLoopBodyStart(const byte * ip) if (m_functionBody->HasCachedScopePropIds()) { const Js::PropertyIdArray *propIds = this->m_functionBody->GetFormalsPropIdArray(); - + Var funcExpr = this->GetFunctionExpression(); PropertyId objectId = ActivationObjectEx::GetLiteralObjectRef(propIds); scopeObject = JavascriptOperators::OP_InitCachedScope(funcExpr, propIds, @@ -7743,7 +7756,7 @@ const byte * InterpreterStackFrame::OP_ProfiledLoopBodyStart(const byte * ip) // value is out of bound if (throws) { - JavascriptError::ThrowRangeError(scriptContext, JSERR_ArgumentOutOfRange, L"SIMD.Int32x4.FromFloat32x4"); + JavascriptError::ThrowRangeError(scriptContext, JSERR_ArgumentOutOfRange, _u("SIMD.Int32x4.FromFloat32x4")); } SetRegRawSimd(playout->U4_0, result); } @@ -8530,7 +8543,7 @@ const byte * InterpreterStackFrame::OP_ProfiledLoopBodyStart(const byte * ip) } Assert(propIds != nullptr); SetLocalClosure(frameObject); - + if (PHASE_VERBOSE_TRACE1(Js::StackArgFormalsOptPhase) && m_functionBody->GetInParamsCount() > 1) { Output::Print(_u("StackArgFormals : %s (%d) :Creating scope object in the bail out path. \n"), m_functionBody->GetDisplayName(), m_functionBody->GetFunctionNumber()); @@ -8550,12 +8563,12 @@ const byte * InterpreterStackFrame::OP_ProfiledLoopBodyStart(const byte * ip) Output::Flush(); } } - + if (heapArgObj) { heapArgObj->SetFormalCount(formalsCount); heapArgObj->SetFrameObject(frameObject); - + if (PHASE_TRACE1(Js::StackArgFormalsOptPhase) && formalsCount > 0) { Output::Print(_u("StackArgFormals : %s (%d) :Attaching the scope object with the heap arguments object in the bail out path. \n"), m_functionBody->GetDisplayName(), m_functionBody->GetFunctionNumber()); diff --git a/lib/Runtime/Language/InterpreterStackFrame.h b/lib/Runtime/Language/InterpreterStackFrame.h index 67a2b7a1012..cf355edbc8f 100644 --- a/lib/Runtime/Language/InterpreterStackFrame.h +++ b/lib/Runtime/Language/InterpreterStackFrame.h @@ -32,7 +32,7 @@ namespace Js { PREVENT_COPY(InterpreterStackFrame) - friend class BailOutRecord; + friend class ::BailOutRecord; friend class JavascriptGeneratorFunction; friend class JavascriptGenerator; @@ -231,7 +231,7 @@ namespace Js UINT16 GetFlags() const { return m_flags; } void OrFlags(UINT16 addTo) { m_flags |= addTo; } bool IsInCatchOrFinallyBlock(); - static bool IsDelayDynamicInterpreterThunk(void* entryPoint); + static bool IsDelayDynamicInterpreterThunk(JavascriptMethod entryPoint); Var LdEnv() const; void SetEnv(FrameDisplay *frameDisplay); @@ -614,7 +614,7 @@ namespace Js inline void OP_StModuleSlot(Var instance, int32 slotIndex1, int32 slotIndex2); inline void* OP_LdArgCnt(); template Var LdHeapArgumentsImpl(Var argsArray, ScriptContext* scriptContext); - inline Var OP_LdHeapArguments(ScriptContext* scriptContext); + Var OP_LdHeapArguments(ScriptContext* scriptContext); inline Var OP_LdLetHeapArguments(ScriptContext* scriptContext); inline Var OP_LdHeapArgsCached(ScriptContext* scriptContext); inline Var OP_LdLetHeapArgsCached(ScriptContext* scriptContext); diff --git a/lib/Runtime/Language/JavascriptNativeOperators.h b/lib/Runtime/Language/JavascriptNativeOperators.h index c3786a781c3..4f0c60161eb 100644 --- a/lib/Runtime/Language/JavascriptNativeOperators.h +++ b/lib/Runtime/Language/JavascriptNativeOperators.h @@ -20,7 +20,7 @@ namespace Js chunk->dataType = "BranchDictionary::Bucket"; if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData BranchDictionary::Bucket: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n", + Output::Print(_u("NativeCodeData BranchDictionary::Bucket: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n"), chunk, (void*)dataBlock, chunk->allocIndex, chunk->len, chunk->offset, chunk->dataType); } #endif @@ -35,7 +35,7 @@ namespace Js chunk->dataType = "BranchDictionary::Entries"; if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { - Output::Print(L"NativeCodeData BranchDictionary::Entries: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n", + Output::Print(_u("NativeCodeData BranchDictionary::Entries: chunk: %p, data: %p, index: %d, len: %x, totalOffset: %x, type: %S\n"), chunk, (void*)dataBlock, chunk->allocIndex, chunk->len, chunk->offset, chunk->dataType); } #endif @@ -52,7 +52,7 @@ namespace Js { this->key = (TKey)remoteKey; } - }; + }; typedef JsUtil::BaseDictionary BranchBaseDictionary; diff --git a/lib/Runtime/Language/JavascriptOperators.cpp b/lib/Runtime/Language/JavascriptOperators.cpp index a7be899c3b8..db1eb023813 100644 --- a/lib/Runtime/Language/JavascriptOperators.cpp +++ b/lib/Runtime/Language/JavascriptOperators.cpp @@ -1396,7 +1396,7 @@ namespace Js ThreadContext *threadContext = scriptContext->GetThreadContext(); - Var iteratorVar = + Var iteratorVar = threadContext->ExecuteImplicitCall(function, ImplicitCall_Accessor, [=]() -> Var { return CALL_FUNCTION(function, CallInfo(Js::CallFlags_Value, 1), aRight); @@ -2087,37 +2087,6 @@ namespace Js return GetterSetter_Impl(instance, propertyName, setterValue, info, scriptContext); } - // Checks to see if any object in the prototype chain has a property descriptor for the given property - // that specifies either an accessor or a non-writable attribute. - // If TRUE, check flags for details. - template - BOOL JavascriptOperators::CheckPrototypesForAccessorOrNonWritablePropertyCore(RecyclableObject* instance, - PropertyKeyType propertyKey, Var* setterValue, DescriptorFlags* flags, PropertyValueInfo* info, ScriptContext* scriptContext) - { - Assert(setterValue); - Assert(flags); - - // Do a quick check to see if all objects in the prototype chain are known to have only - // writable data properties (i.e. no accessors or non-writable properties). - if (doFastProtoChainCheck && CheckIfObjectAndPrototypeChainHasOnlyWritableDataProperties(instance)) - { - return FALSE; - } - - if (isRoot) - { - *flags = JavascriptOperators::GetRootSetter(instance, propertyKey, setterValue, info, scriptContext); - } - if (*flags == None) - { - *flags = JavascriptOperators::GetterSetter(instance, propertyKey, setterValue, info, scriptContext); - } - - - - return ((*flags & Accessor) == Accessor) || ((*flags & Proxy) == Proxy)|| ((*flags & Data) == Data && (*flags & Writable) == None); - } - void JavascriptOperators::OP_InvalidateProtoCaches(PropertyId propertyId, ScriptContext *scriptContext) { scriptContext->InvalidateProtoCaches(propertyId); @@ -2931,7 +2900,7 @@ namespace Js // If we have console scope and no one in the scope had the property add it to console scope if ((length > 0) && ConsoleScopeActivationObject::Is(pDisplay->GetItem(length - 1))) { - // CheckPrototypesForAccessorOrNonWritableProperty does not check for const in global object. We should check it here. + // CheckPrototypesForAccessorOrNonWritableProperty does not check for const in global object. We should check it here. if ((length > 1) && GlobalObject::Is(pDisplay->GetItem(length - 2))) { GlobalObject* globalObject = GlobalObject::FromVar(pDisplay->GetItem(length - 2)); @@ -4925,7 +4894,7 @@ namespace Js return JavascriptOperators::OP_GetProperty(instance, PropertyIds::length, scriptContext); } - inline Var JavascriptOperators::GetThisFromModuleRoot(Var thisVar) + Var JavascriptOperators::GetThisFromModuleRoot(Var thisVar) { RootObjectBase * rootObject = static_cast(thisVar); RecyclableObject* hostObject = rootObject->GetHostObject(); @@ -5130,7 +5099,7 @@ namespace Js { return false; } - if (DynamicType::Is(typeId) && + if (DynamicType::Is(typeId) && static_cast(instance)->GetTypeHandler()->IsStringTypeHandler()) { return false; @@ -5140,7 +5109,7 @@ namespace Js return false; } return !(instance->HasDeferredTypeHandler() && - JavascriptFunction::Is(instance) && + JavascriptFunction::Is(instance) && JavascriptFunction::FromVar(instance)->IsExternalFunction()); } @@ -5154,7 +5123,7 @@ namespace Js { return false; } - } + } return true; } @@ -6814,8 +6783,8 @@ namespace Js formalsCount = propIds->count; Assert(formalsCount != 0 && propIds != nullptr); } - - HeapArgumentsObject *argsObj = JavascriptOperators::CreateHeapArguments(funcCallee, actualsCount, formalsCount, frameObj, scriptContext); + + HeapArgumentsObject *argsObj = JavascriptOperators::CreateHeapArguments(funcCallee, actualsCount, formalsCount, frameObj, scriptContext); return FillScopeObject(funcCallee, actualsCount, formalsCount, frameObj, paramAddr, propIds, argsObj, scriptContext, nonSimpleParamList, false); } @@ -6826,11 +6795,11 @@ namespace Js "Loading the arguments object in the global function?"); HeapArgumentsObject *argsObj = JavascriptOperators::CreateHeapArguments(funcCallee, actualsCount, formalsCount, frameObj, scriptContext); - + return FillScopeObject(funcCallee, actualsCount, formalsCount, frameObj, paramAddr, nullptr, argsObj, scriptContext, nonSimpleParamList, true); } - Var JavascriptOperators::FillScopeObject(JavascriptFunction *funcCallee, uint32 actualsCount, uint32 formalsCount, Var frameObj, Var * paramAddr, + Var JavascriptOperators::FillScopeObject(JavascriptFunction *funcCallee, uint32 actualsCount, uint32 formalsCount, Var frameObj, Var * paramAddr, Js::PropertyIdArray *propIds, HeapArgumentsObject * argsObj, ScriptContext * scriptContext, bool nonSimpleParamList, bool useCachedScope) { Assert(frameObj); @@ -7007,7 +6976,7 @@ namespace Js if (scriptContext->GetConfig()->IsES6HasInstanceEnabled()) { Var instOfHandler = JavascriptOperators::GetProperty(constructor, PropertyIds::_symbolHasInstance, scriptContext); - if (JavascriptOperators::IsUndefinedObject(instOfHandler) + if (JavascriptOperators::IsUndefinedObject(instOfHandler) || instOfHandler == scriptContext->GetBuiltInLibraryFunction(JavascriptFunction::EntryInfo::SymbolHasInstance.GetOriginalEntryPoint())) { return JavascriptBoolean::ToVar(constructor->HasInstance(instance, scriptContext, inlineCache), scriptContext); @@ -8400,7 +8369,7 @@ namespace Js { // CONSIDER (EquivObjTypeSpec): Invent some form of least recently used eviction scheme. uintptr_t index = (reinterpret_cast(type) >> 4) & (EQUIVALENT_TYPE_CACHE_SIZE - 1); - + if (cache->nextEvictionVictim == EQUIVALENT_TYPE_CACHE_SIZE) { __analysis_assume(index < EQUIVALENT_TYPE_CACHE_SIZE); @@ -8425,7 +8394,7 @@ namespace Js __analysis_assume(index < EQUIVALENT_TYPE_CACHE_SIZE); equivTypes[index] = type; } - + // Fixed field checks allow us to assume a specific type ID, but the assumption is only // valid if we lock the type. Otherwise, the type ID may change out from under us without // evolving the type. @@ -10763,20 +10732,6 @@ namespace Js } } - template - BOOL JavascriptOperators::CheckPrototypesForAccessorOrNonWritablePropertySlow(RecyclableObject* instance, PropertyKeyType propertyKey, Var* setterValue, DescriptorFlags* flags, bool isRoot, ScriptContext* scriptContext) - { - // This is used in debug verification, do not doFastProtoChainCheck to avoid side effect (doFastProtoChainCheck may update HasWritableDataOnly flags). - if (isRoot) - { - return CheckPrototypesForAccessorOrNonWritablePropertyCore(instance, propertyKey, setterValue, flags, nullptr, scriptContext); - } - else - { - return CheckPrototypesForAccessorOrNonWritablePropertyCore(instance, propertyKey, setterValue, flags, nullptr, scriptContext); - } - } - BOOL JavascriptOperators::SetProperty(Var instance, RecyclableObject* object, PropertyId propertyId, Var newValue, ScriptContext* requestContext, PropertyOperationFlags propertyOperationFlags) { PropertyValueInfo info; diff --git a/lib/Runtime/Language/JavascriptOperators.inl b/lib/Runtime/Language/JavascriptOperators.inl index 807a3c7be19..7fbebcab160 100644 --- a/lib/Runtime/Language/JavascriptOperators.inl +++ b/lib/Runtime/Language/JavascriptOperators.inl @@ -106,4 +106,46 @@ namespace Js return false; } + // Checks to see if any object in the prototype chain has a property descriptor for the given property + // that specifies either an accessor or a non-writable attribute. + // If TRUE, check flags for details. + template + BOOL JavascriptOperators::CheckPrototypesForAccessorOrNonWritablePropertyCore(RecyclableObject* instance, + PropertyKeyType propertyKey, Var* setterValue, DescriptorFlags* flags, PropertyValueInfo* info, ScriptContext* scriptContext) + { + Assert(setterValue); + Assert(flags); + + // Do a quick check to see if all objects in the prototype chain are known to have only + // writable data properties (i.e. no accessors or non-writable properties). + if (doFastProtoChainCheck && CheckIfObjectAndPrototypeChainHasOnlyWritableDataProperties(instance)) + { + return FALSE; + } + + if (isRoot) + { + *flags = JavascriptOperators::GetRootSetter(instance, propertyKey, setterValue, info, scriptContext); + } + if (*flags == None) + { + *flags = JavascriptOperators::GetterSetter(instance, propertyKey, setterValue, info, scriptContext); + } + + return ((*flags & Accessor) == Accessor) || ((*flags & Proxy) == Proxy)|| ((*flags & Data) == Data && (*flags & Writable) == None); + } + + template + BOOL JavascriptOperators::CheckPrototypesForAccessorOrNonWritablePropertySlow(RecyclableObject* instance, PropertyKeyType propertyKey, Var* setterValue, DescriptorFlags* flags, bool isRoot, ScriptContext* scriptContext) + { + // This is used in debug verification, do not doFastProtoChainCheck to avoid side effect (doFastProtoChainCheck may update HasWritableDataOnly flags). + if (isRoot) + { + return CheckPrototypesForAccessorOrNonWritablePropertyCore(instance, propertyKey, setterValue, flags, nullptr, scriptContext); + } + else + { + return CheckPrototypesForAccessorOrNonWritablePropertyCore(instance, propertyKey, setterValue, flags, nullptr, scriptContext); + } + } } diff --git a/lib/Runtime/Language/ObjTypeSpecFldInfo.cpp b/lib/Runtime/Language/ObjTypeSpecFldInfo.cpp index c61b4107aa9..ca2e6b8103c 100644 --- a/lib/Runtime/Language/ObjTypeSpecFldInfo.cpp +++ b/lib/Runtime/Language/ObjTypeSpecFldInfo.cpp @@ -166,27 +166,27 @@ namespace Js functionObject = (Js::JavascriptFunction *)fixedProperty; if (PHASE_VERBOSE_TRACE(Js::FixedMethodsPhase, functionBody)) { - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Js::DynamicObject* protoObject = isProto ? prototypeObject : nullptr; - Output::Print(L"FixedFields: function %s (%s) cloning cache with fixed method: %s (%s), function: 0x%p, body: 0x%p (cache id: %d, layout: %s, type: 0x%p, proto: 0x%p, proto type: 0x%p)\n", + Output::Print(_u("FixedFields: function %s (%s) cloning cache with fixed method: %s (%s), function: 0x%p, body: 0x%p (cache id: %d, layout: %s, type: 0x%p, proto: 0x%p, proto type: 0x%p)\n"), functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer), fixedPropertyRecord->GetBuffer(), functionObject->GetFunctionInfo()->GetFunctionProxy() ? - functionObject->GetFunctionInfo()->GetFunctionProxy()->GetDebugNumberSet(debugStringBuffer2) : L"(null)", functionObject, functionObject->GetFunctionInfo(), - cacheId, isProto ? L"proto" : L"local", type, protoObject, protoObject != nullptr ? protoObject->GetType() : nullptr); + functionObject->GetFunctionInfo()->GetFunctionProxy()->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"), functionObject, functionObject->GetFunctionInfo(), + cacheId, isProto ? _u("proto") : _u("local"), type, protoObject, protoObject != nullptr ? protoObject->GetType() : nullptr); Output::Flush(); } if (PHASE_VERBOSE_TESTTRACE(Js::FixedMethodsPhase, functionBody)) { - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - Output::Print(L"FixedFields: function %s (%s) cloning cache with fixed method: %s (%s) (cache id: %d, layout: %s)\n", + Output::Print(_u("FixedFields: function %s (%s) cloning cache with fixed method: %s (%s) (cache id: %d, layout: %s)\n"), functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer), fixedPropertyRecord->GetBuffer(), functionObject->GetFunctionInfo()->GetFunctionProxy() ? - functionObject->GetFunctionInfo()->GetFunctionProxy()->GetDebugNumberSet(debugStringBuffer2) : L"(null)", functionObject, functionObject->GetFunctionInfo(), - cacheId, isProto ? L"proto" : L"local"); + functionObject->GetFunctionInfo()->GetFunctionProxy()->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"), functionObject, functionObject->GetFunctionInfo(), + cacheId, isProto ? _u("proto") : _u("local")); Output::Flush(); } @@ -217,12 +217,12 @@ namespace Js { if (PHASE_TRACE(Js::FixedNewObjPhase, functionBody)) { - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - Output::Print(L"FixedNewObj: function %s (%s) ctor cache for %s (%s) about to be cloned has unlocked inline slot count: guard value = 0x%p, type = 0x%p, slots = %d, inline slots = %d\n", + Output::Print(_u("FixedNewObj: function %s (%s) ctor cache for %s (%s) about to be cloned has unlocked inline slot count: guard value = 0x%p, type = 0x%p, slots = %d, inline slots = %d\n"), functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer), fixedPropertyRecord->GetBuffer(), functionObject->GetFunctionInfo()->GetFunctionBody() ? - functionObject->GetFunctionInfo()->GetFunctionBody()->GetDebugNumberSet(debugStringBuffer2) : L"(null)", + functionObject->GetFunctionInfo()->GetFunctionBody()->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"), runtimeConstructorCache->GetRawGuardValue(), runtimeConstructorCache->GetType(), runtimeConstructorCache->GetSlotCount(), runtimeConstructorCache->GetInlineSlotCount()); Output::Flush(); @@ -239,12 +239,12 @@ namespace Js if (PHASE_TRACE(Js::FixedNewObjPhase, functionBody)) { - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - Output::Print(L"FixedNewObj: function %s (%s) cloning ctor cache for %s (%s): guard value = 0x%p, type = 0x%p, slots = %d, inline slots = %d\n", + Output::Print(_u("FixedNewObj: function %s (%s) cloning ctor cache for %s (%s): guard value = 0x%p, type = 0x%p, slots = %d, inline slots = %d\n"), functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer), fixedPropertyRecord->GetBuffer(), functionObject->GetFunctionInfo()->GetFunctionBody() ? - functionObject->GetFunctionInfo()->GetFunctionBody()->GetDebugNumberSet(debugStringBuffer2) : L"(null)", functionObject, functionObject->GetFunctionInfo(), + functionObject->GetFunctionInfo()->GetFunctionBody()->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"), functionObject, functionObject->GetFunctionInfo(), runtimeConstructorCache->GetRawGuardValue(), runtimeConstructorCache->IsNormal() ? runtimeConstructorCache->GetType() : nullptr, runtimeConstructorCache->GetSlotCount(), runtimeConstructorCache->GetInlineSlotCount()); Output::Flush(); @@ -256,19 +256,19 @@ namespace Js { if (PHASE_TRACE(Js::FixedNewObjPhase, functionBody)) { - wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - wchar_t debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; + char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; - Output::Print(L"FixedNewObj: function %s (%s) skipping ctor cache for %s (%s), because %s (guard value = 0x%p, script context = %p).\n", + Output::Print(_u("FixedNewObj: function %s (%s) skipping ctor cache for %s (%s), because %s (guard value = 0x%p, script context = %p).\n"), functionBody->GetDisplayName(), functionBody->GetDebugNumberSet(debugStringBuffer), fixedPropertyRecord->GetBuffer(), functionObject->GetFunctionInfo()->GetFunctionBody() ? - functionObject->GetFunctionInfo()->GetFunctionBody()->GetDebugNumberSet(debugStringBuffer2) : L"(null)", functionObject, functionObject->GetFunctionInfo(), - runtimeConstructorCache->IsEmpty() ? L"cache is empty (or has been cleared)" : - runtimeConstructorCache->IsInvalidated() ? L"cache is invalidated" : - runtimeConstructorCache->SkipDefaultNewObject() ? L"default new object isn't needed" : - runtimeConstructorCache->NeedsTypeUpdate() ? L"cache needs to be updated" : - runtimeConstructorCache->NeedsUpdateAfterCtor() ? L"cache needs update after ctor" : - runtimeConstructorCache->IsPolymorphic() ? L"cache is polymorphic" : - runtimeConstructorCache->GetScriptContext() != functionBody->GetScriptContext() ? L"script context mismatch" : L"of an unexpected situation", + functionObject->GetFunctionInfo()->GetFunctionBody()->GetDebugNumberSet(debugStringBuffer2) : _u("(null)"), functionObject, functionObject->GetFunctionInfo(), + runtimeConstructorCache->IsEmpty() ? _u("cache is empty (or has been cleared)") : + runtimeConstructorCache->IsInvalidated() ? _u("cache is invalidated") : + runtimeConstructorCache->SkipDefaultNewObject() ? _u("default new object isn't needed") : + runtimeConstructorCache->NeedsTypeUpdate() ? _u("cache needs to be updated") : + runtimeConstructorCache->NeedsUpdateAfterCtor() ? _u("cache needs update after ctor") : + runtimeConstructorCache->IsPolymorphic() ? _u("cache is polymorphic") : + runtimeConstructorCache->GetScriptContext() != functionBody->GetScriptContext() ? _u("script context mismatch") : _u("of an unexpected situation"), runtimeConstructorCache->GetRawGuardValue(), runtimeConstructorCache->GetScriptContext()); Output::Flush(); } @@ -338,7 +338,7 @@ namespace Js if (PHASE_TRACE(Js::ObjTypeSpecPhase, topFunctionBody) || PHASE_TRACE(Js::EquivObjTypeSpecPhase, topFunctionBody)) { const PropertyRecord* propertyRecord = scriptContext->GetPropertyName(propertyId); - Output::Print(L"Created ObjTypeSpecFldInfo: id %u, property %s(#%u), slot %u, type set: 0x%p\n", + Output::Print(_u("Created ObjTypeSpecFldInfo: id %u, property %s(#%u), slot %u, type set: 0x%p\n"), id, propertyRecord->GetBuffer(), propertyId, slotIndex, type); Output::Flush(); } @@ -352,7 +352,7 @@ namespace Js if (PHASE_TRACE(Js::ObjTypeSpecPhase, topFunctionBody) || PHASE_TRACE(Js::EquivObjTypeSpecPhase, topFunctionBody)) { const PropertyRecord* propertyRecord = scriptContext->GetPropertyName(propertyId); - Output::Print(L"Created ObjTypeSpecFldInfo: id %u, property %s(#%u), slot %u, type: 0x%p\n", + Output::Print(_u("Created ObjTypeSpecFldInfo: id %u, property %s(#%u), slot %u, type: 0x%p\n"), id, propertyRecord->GetBuffer(), propertyId, slotIndex, type); Output::Flush(); } @@ -384,13 +384,13 @@ namespace Js } } - Assert(cache->GetSize() < MAXUINT16); + Assert(cache->GetSize() < UINT16_MAX); Js::InlineCache* inlineCaches = cache->GetInlineCaches(); Js::DynamicObject* prototypeObject = nullptr; Js::DynamicObject* accessorOwnerObject = nullptr; Js::TypeId typeId = TypeIds_Limit; uint16 polyCacheSize = (uint16)cache->GetSize(); - uint16 firstNonEmptyCacheIndex = MAXUINT16; + uint16 firstNonEmptyCacheIndex = UINT16_MAX; uint16 slotIndex = 0; bool areEquivalent = true; bool usesAuxSlot = false; @@ -408,7 +408,7 @@ namespace Js InlineCache& inlineCache = inlineCaches[i]; if (inlineCache.IsEmpty()) continue; - if (firstNonEmptyCacheIndex == MAXUINT16) + if (firstNonEmptyCacheIndex == UINT16_MAX) { if (inlineCache.IsLocal()) { @@ -494,7 +494,7 @@ namespace Js typeCount++; } - if (firstNonEmptyCacheIndex == MAXUINT16) + if (firstNonEmptyCacheIndex == UINT16_MAX) { IncInlineCacheCount(emptyPolyInlineCacheCount); return nullptr; @@ -679,13 +679,13 @@ namespace Js if (typeSet) { const PropertyRecord* propertyRecord = scriptContext->GetPropertyName(propertyId); - Output::Print(L"Created ObjTypeSpecFldInfo: id %u, property %s(#%u), slot %u, type set: ", + Output::Print(_u("Created ObjTypeSpecFldInfo: id %u, property %s(#%u), slot %u, type set: "), id, propertyRecord->GetBuffer(), propertyId, slotIndex); for (uint16 ti = 0; ti < typeCount - 1; ti++) { - Output::Print(L"0x%p, ", typeSet->GetType(ti)); + Output::Print(_u("0x%p, "), typeSet->GetType(ti)); } - Output::Print(L"0x%p\n", typeSet->GetType(typeCount - 1)); + Output::Print(_u("0x%p\n"), typeSet->GetType(typeCount - 1)); Output::Flush(); } } @@ -763,9 +763,9 @@ namespace Js } #ifdef ENABLE_DEBUG_CONFIG_OPTIONS - const wchar_t* ObjTypeSpecFldInfo::GetCacheLayoutString() const + const char16* ObjTypeSpecFldInfo::GetCacheLayoutString() const { - return IsLoadedFromProto() ? L"proto" : UsesAccessor() ? L"flags" : L"local"; + return IsLoadedFromProto() ? _u("proto") : UsesAccessor() ? _u("flags") : _u("local"); } #endif diff --git a/lib/Runtime/Language/ObjTypeSpecFldInfo.h b/lib/Runtime/Language/ObjTypeSpecFldInfo.h index b95942f9a30..9c82f10eec9 100644 --- a/lib/Runtime/Language/ObjTypeSpecFldInfo.h +++ b/lib/Runtime/Language/ObjTypeSpecFldInfo.h @@ -363,7 +363,7 @@ namespace Js } #ifdef ENABLE_DEBUG_CONFIG_OPTIONS - const wchar_t *GetCacheLayoutString() const; + const char16 *GetCacheLayoutString() const; #endif }; diff --git a/lib/Runtime/Language/ProfilingHelpers.cpp b/lib/Runtime/Language/ProfilingHelpers.cpp index 88c06c61d0d..023f7a2792d 100644 --- a/lib/Runtime/Language/ProfilingHelpers.cpp +++ b/lib/Runtime/Language/ProfilingHelpers.cpp @@ -81,7 +81,7 @@ namespace Js const int32 index = TaggedInt::ToInt32(varIndex); const uint32 offset = index; - if(index < 0 || offset >= headSegmentLength || array && array->IsMissingHeadSegmentItem(offset)) + if(index < 0 || offset >= headSegmentLength || (array && array->IsMissingHeadSegmentItem(offset))) { ldElemInfo.neededHelperCall = true; break; @@ -446,7 +446,7 @@ namespace Js CallInfo callInfo, ...) { - ARGUMENTS(args, callInfo); + ARGUMENTS(args, callee, framePointer, profileId, arrayProfileId, callInfo); return ProfiledNewScObjArray( callee, diff --git a/lib/Runtime/Language/SourceDynamicProfileManager.h b/lib/Runtime/Language/SourceDynamicProfileManager.h index 9112c613b22..9671c1648cb 100644 --- a/lib/Runtime/Language/SourceDynamicProfileManager.h +++ b/lib/Runtime/Language/SourceDynamicProfileManager.h @@ -65,6 +65,7 @@ namespace Js static const uint MAX_FUNCTION_COUNT = 10000; // Consider data corrupt if there are more functions than this +#ifdef ENABLE_WININET_PROFILE_DATA_CACHE // // Simple read-only wrapper around IStream - templatized and returns boolean result to indicate errors // @@ -148,6 +149,7 @@ namespace Js IStream* stream; }; +#endif // ENABLE_WININET_PROFILE_DATA_CACHE }; }; -#endif +#endif // ENABLE_PROFILE_INFO diff --git a/lib/Runtime/Language/TaggedInt.cpp b/lib/Runtime/Language/TaggedInt.cpp index bfe973fcc08..c74fc04468a 100644 --- a/lib/Runtime/Language/TaggedInt.cpp +++ b/lib/Runtime/Language/TaggedInt.cpp @@ -515,9 +515,9 @@ namespace Js #if INT32VAR Var result = aValue; - (*(int *)&result)++; + (*(unsigned int *)&result)++; // unsigned to avoid signed int overflow #else - int n = reinterpret_cast(aValue); + unsigned int n = reinterpret_cast(aValue); n += 2; Var result = reinterpret_cast(n); #endif @@ -558,9 +558,9 @@ namespace Js #if INT32VAR Var result = aValue; - (*(int *)&result)--; + (*(unsigned int *)&result)--; // unsigned to avoid signed int overflow #else - int n = reinterpret_cast(aValue); + unsigned int n = reinterpret_cast(aValue); n -= 2; Var result = reinterpret_cast(n); #endif diff --git a/lib/Runtime/Language/ValueType.cpp b/lib/Runtime/Language/ValueType.cpp index 0c8e5d1fc14..f750793e376 100644 --- a/lib/Runtime/Language/ValueType.cpp +++ b/lib/Runtime/Language/ValueType.cpp @@ -59,7 +59,7 @@ ValueType ValueType::GetNumberAndLikelyInt(const bool isLikelyTagged) return Verify(GetInt(isLikelyTagged).bits | Bits::Number); } -inline ValueType ValueType::GetObject(const ObjectType objectType) +ValueType ValueType::GetObject(const ObjectType objectType) { ValueType valueType(UninitializedObject); valueType.SetObjectType(objectType); @@ -1483,7 +1483,7 @@ ValueType ValueType::FromObjectWithArray(Js::DynamicObject *const object) return FromObjectArray(JavascriptArray::FromVar(objectArray)); } -inline ValueType ValueType::FromObjectArray(Js::JavascriptArray *const objectArray) +ValueType ValueType::FromObjectArray(Js::JavascriptArray *const objectArray) { using namespace Js; Assert(objectArray); @@ -1491,7 +1491,7 @@ inline ValueType ValueType::FromObjectArray(Js::JavascriptArray *const objectArr return FromArray(ObjectType::ObjectWithArray, objectArray, TypeIds_Array); // objects with native arrays are currently not supported } -inline ValueType ValueType::FromArray( +ValueType ValueType::FromArray( const ObjectType objectType, Js::JavascriptArray *const array, const Js::TypeId arrayTypeId) diff --git a/lib/Runtime/Language/amd64/JavascriptOperatorsA.S b/lib/Runtime/Language/amd64/JavascriptOperatorsA.S index e45ef22f5e3..a5692eac92b 100644 --- a/lib/Runtime/Language/amd64/JavascriptOperatorsA.S +++ b/lib/Runtime/Language/amd64/JavascriptOperatorsA.S @@ -25,7 +25,7 @@ C_FUNC(amd64_CallWithFakeFrame): lea rax, [rip + C_FUNC(amd64_ReturnFromCallWithFakeFrame)] mov [rsp+8h], rax - mov rax, [rsp + 28h] + mov rax, r8 // arg0 push rbp mov rbp, rsi @@ -33,17 +33,17 @@ C_FUNC(amd64_CallWithFakeFrame): // Frame spill size. sub rsp, rdx - // Save callee-saved xmm registers - movapd xmmword ptr [rsp + 90h], xmm15 - movapd xmmword ptr [rsp + 80h], xmm14 - movapd xmmword ptr [rsp + 70h], xmm13 - movapd xmmword ptr [rsp + 60h], xmm12 - movapd xmmword ptr [rsp + 50h], xmm11 - movapd xmmword ptr [rsp + 40h], xmm10 - movapd xmmword ptr [rsp + 30h], xmm9 - movapd xmmword ptr [rsp + 20h], xmm8 - movapd xmmword ptr [rsp + 10h], xmm7 - movapd xmmword ptr [rsp], xmm6 + // Save callee-saved xmm registers -- none on Sys V x64 + // movapd xmmword ptr [rsp + 90h], xmm15 + // movapd xmmword ptr [rsp + 80h], xmm14 + // movapd xmmword ptr [rsp + 70h], xmm13 + // movapd xmmword ptr [rsp + 60h], xmm12 + // movapd xmmword ptr [rsp + 50h], xmm11 + // movapd xmmword ptr [rsp + 40h], xmm10 + // movapd xmmword ptr [rsp + 30h], xmm9 + // movapd xmmword ptr [rsp + 20h], xmm8 + // movapd xmmword ptr [rsp + 10h], xmm7 + // movapd xmmword ptr [rsp], xmm6 // Save all callee saved registers. push r15 @@ -70,17 +70,17 @@ C_FUNC(amd64_ReturnFromCallWithFakeFrame): pop r14 pop r15 - // Restore callee-saved xmm registers - movapd xmm6, xmmword ptr [rsp] - movapd xmm7, xmmword ptr [rsp + 10h] - movapd xmm8, xmmword ptr [rsp + 20h] - movapd xmm9, xmmword ptr [rsp + 30h] - movapd xmm10, xmmword ptr [rsp + 40h] - movapd xmm11, xmmword ptr [rsp + 50h] - movapd xmm12, xmmword ptr [rsp + 60h] - movapd xmm13, xmmword ptr [rsp + 70h] - movapd xmm14, xmmword ptr [rsp + 80h] - movapd xmm15, xmmword ptr [rsp + 90h] + // Restore callee-saved xmm registers -- none on Sys V x64; must match RegList.h + // movapd xmm6, xmmword ptr [rsp] + // movapd xmm7, xmmword ptr [rsp + 10h] + // movapd xmm8, xmmword ptr [rsp + 20h] + // movapd xmm9, xmmword ptr [rsp + 30h] + // movapd xmm10, xmmword ptr [rsp + 40h] + // movapd xmm11, xmmword ptr [rsp + 50h] + // movapd xmm12, xmmword ptr [rsp + 60h] + // movapd xmm13, xmmword ptr [rsp + 70h] + // movapd xmm14, xmmword ptr [rsp + 80h] + // movapd xmm15, xmmword ptr [rsp + 90h] add rsp, rdx diff --git a/lib/Runtime/Language/amd64/amd64_Thunks.S b/lib/Runtime/Language/amd64/amd64_Thunks.S new file mode 100644 index 00000000000..2028441febb --- /dev/null +++ b/lib/Runtime/Language/amd64/amd64_Thunks.S @@ -0,0 +1,382 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- +.intel_syntax noprefix +#include "unixasmmacros.inc" + + +#ifdef _ENABLE_DYNAMIC_THUNKS + +//============================================================================================================ +// InterpreterStackFrame::DelayDynamicInterpreterThunk +//============================================================================================================ + +// JavascriptMethod InterpreterStackFrame::EnsureDynamicInterpreterThunk(ScriptFunction * function) +// extrn _ZN2Js21InterpreterStackFrame29EnsureDynamicInterpreterThunkEPNS_14ScriptFunctionE + +// Var InterpreterStackFrame::DelayDynamicInterpreterThunk(RecyclableObject* function, CallInfo callInfo, ...) +.balign 16 +NESTED_ENTRY _ZN2Js21InterpreterStackFrame28DelayDynamicInterpreterThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT, NoHandler + push_nonvol_reg rbp + lea rbp, [rsp] + + // save argument registers used by custom calling convention + push_register rdi + push_register rsi + + // JavascriptMethod InterpreterStackFrame::EnsureDynamicInterpreterThunk( + // ScriptFunction * function) + // + // RDI == function, setup by custom calling convention + call C_FUNC(_ZN2Js21InterpreterStackFrame29EnsureDynamicInterpreterThunkEPNS_14ScriptFunctionE) + + pop_register rsi + pop_register rdi + pop_nonvol_reg rbp + + jmp rax +NESTED_END _ZN2Js21InterpreterStackFrame28DelayDynamicInterpreterThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT + + +//============================================================================================================ +// InterpreterStackFrame::AsmJsDelayDynamicInterpreterThunk +//============================================================================================================ + +// JavascriptMethod InterpreterStackFrame::EnsureDynamicInterpreterThunk(ScriptFunction * function) +// extrn _ZN2Js21InterpreterStackFrame29EnsureDynamicInterpreterThunkEPNS_14ScriptFunctionE + +// Var InterpreterStackFrame::AsmJsDelayDynamicInterpreterThunk(RecyclableObject* function, CallInfo callInfo, ...) +.balign 16 +NESTED_ENTRY _ZN2Js21InterpreterStackFrame33AsmJsDelayDynamicInterpreterThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT, NoHandler + push_nonvol_reg rbp // push rbp and adjust CFA offset + lea rbp, [rsp] + + set_cfa_register rbp, (2*8) // Set to compute CFA as: rbp + 16 (sizeof: [rbp] [ReturnAddress]) + + // save argument registers used by custom calling convention + push rdi + push rsi + push rdx + push rcx + push r8 + push r9 + + sub rsp, 40h + + // spill potential floating point arguments to stack + movaps xmmword ptr [rsp + 00h], xmm0 + movaps xmmword ptr [rsp + 10h], xmm1 + movaps xmmword ptr [rsp + 20h], xmm2 + movaps xmmword ptr [rsp + 30h], xmm3 + call C_FUNC(_ZN2Js21InterpreterStackFrame29EnsureDynamicInterpreterThunkEPNS_14ScriptFunctionE) + // restore potential floating point arguments from stack + movaps xmm0, xmmword ptr [rsp + 00h] + movaps xmm1, xmmword ptr [rsp + 10h] + movaps xmm2, xmmword ptr [rsp + 20h] + movaps xmm3, xmmword ptr [rsp + 30h] + + add rsp, 40h + + pop r9 + pop r8 + pop rcx + pop rdx + pop rsi + pop rdi + + pop_nonvol_reg rbp + jmp rax +NESTED_END _ZN2Js21InterpreterStackFrame33AsmJsDelayDynamicInterpreterThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT + + +//============================================================================================================ +// DynamicProfileInfo::EnsureDynamicProfileInfoThunk +//============================================================================================================ +// JavascriptMethod DynamicProfileInfo::EnsureDynamicProfileInfo(ScriptFunction * function) +// extrn _ZN2Js18DynamicProfileInfo24EnsureDynamicProfileInfoEPNS_14ScriptFunctionE + +// Var DynamicProfileInfo::EnsureDynamicProfileInfoThunk(RecyclableObject* function, CallInfo callInfo, ...) +.balign 16 +NESTED_ENTRY _ZN2Js18DynamicProfileInfo29EnsureDynamicProfileInfoThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT, NoHandler + push_nonvol_reg rbp + lea rbp, [rsp] + + // save argument registers used by custom calling convention + push_register rdi + push_register rsi + + call _ZN2Js18DynamicProfileInfo24EnsureDynamicProfileInfoEPNS_14ScriptFunctionE + + pop_register rsi + pop_register rdi + + pop_nonvol_reg rbp + jmp rax +NESTED_END _ZN2Js18DynamicProfileInfo29EnsureDynamicProfileInfoThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT + +#endif // _ENABLE_DYNAMIC_THUNKS + + +#ifdef ENABLE_SCRIPT_PROFILING + +//============================================================================================================ +// ScriptContext::ProfileModeDeferredParsingThunk +//============================================================================================================ + +// Js::JavascriptMethod ScriptContext::ProfileModeDeferredParse(ScriptFunction *function) +extrn ?ProfileModeDeferredParse@ScriptContext@Js@@SAP6APEAXPEAVRecyclableObject@2@UCallInfo@2@ZZPEAPEAVScriptFunction@2@@Z : PROC + +// Var ScriptContext::ProfileModeDeferredParsingThunk(RecyclableObject* function, CallInfo callInfo, ...) +align 16 +?ProfileModeDeferredParsingThunk@ScriptContext@Js@@SAPEAXPEAVRecyclableObject@2@UCallInfo@2@ZZ PROC FRAME + // save volatile registers + mov qword ptr [rsp + 8h], rcx + mov qword ptr [rsp + 10h], rdx + mov qword ptr [rsp + 18h], r8 + mov qword ptr [rsp + 20h], r9 + + push rbp + .pushreg rbp + lea rbp, [rsp] + .setframe rbp, 0 + .endprolog + + sub rsp, 20h + lea rcx, [rsp + 30h] + call ?ProfileModeDeferredParse@ScriptContext@Js@@SAP6APEAXPEAVRecyclableObject@2@UCallInfo@2@ZZPEAPEAVScriptFunction@2@@Z + + add rsp, 20h + + lea rsp, [rbp] + pop rbp + + // restore volatile registers + mov rcx, qword ptr [rsp + 8h] + mov rdx, qword ptr [rsp + 10h] + mov r8, qword ptr [rsp + 18h] + mov r9, qword ptr [rsp + 20h] + + rex_jmp_reg rax +?ProfileModeDeferredParsingThunk@ScriptContext@Js@@SAPEAXPEAVRecyclableObject@2@UCallInfo@2@ZZ ENDP + +//============================================================================================================ + + +//============================================================================================================ +// ScriptContext::ProfileModeDeferredDeserializeThunk +//============================================================================================================ + +// Js::JavascriptMethod ScriptContext::ProfileModeDeferredDeserialize(ScriptFunction *function) +extrn ?ProfileModeDeferredDeserialize@ScriptContext@Js@@SAP6APEAXPEAVRecyclableObject@2@UCallInfo@2@ZZPEAVScriptFunction@2@@Z : PROC + +// Var ScriptContext::ProfileModeDeferredDeserializeThunk(RecyclableObject* function, CallInfo callInfo, ...) +align 16 +?ProfileModeDeferredDeserializeThunk@ScriptContext@Js@@SAPEAXPEAVRecyclableObject@2@UCallInfo@2@ZZ PROC FRAME + // save volatile registers + mov qword ptr [rsp + 8h], rcx + mov qword ptr [rsp + 10h], rdx + mov qword ptr [rsp + 18h], r8 + mov qword ptr [rsp + 20h], r9 + + push rbp + .pushreg rbp + lea rbp, [rsp] + .setframe rbp, 0 + .endprolog + + sub rsp, 20h + call ?ProfileModeDeferredDeserialize@ScriptContext@Js@@SAP6APEAXPEAVRecyclableObject@2@UCallInfo@2@ZZPEAVScriptFunction@2@@Z + + add rsp, 20h + + lea rsp, [rbp] + pop rbp + + // restore volatile registers + mov rcx, qword ptr [rsp + 8h] + mov rdx, qword ptr [rsp + 10h] + mov r8, qword ptr [rsp + 18h] + mov r9, qword ptr [rsp + 20h] + + rex_jmp_reg rax +?ProfileModeDeferredDeserializeThunk@ScriptContext@Js@@SAPEAXPEAVRecyclableObject@2@UCallInfo@2@ZZ ENDP + +#endif // ENABLE_SCRIPT_PROFILING + + +#ifdef _ENABLE_DYNAMIC_THUNKS + +//============================================================================================================ +// Js::AsmJsInterpreterThunk +//============================================================================================================ + +// extern _ZN2Js21InterpreterStackFrame29GetAsmJsInterpreterEntryPointEPNS_20AsmJsCallStackLayoutE + +// AsmJsInterpreterThunk (AsmJsCallStackLayout *function, ...) +.balign 16 +NESTED_ENTRY _ZN2Js21InterpreterStackFrame19InterpreterAsmThunkEPNS_20AsmJsCallStackLayoutE, _TEXT, NoHandler + push_nonvol_reg rbp // push rbp and adjust CFA offset + lea rbp, [rsp] + + set_cfa_register rbp, (2*8) // Set to compute CFA as: rbp + 16 (sizeof: [rbp] [ReturnAddress]) + + sub rsp, 40h + + // spill potential floating point arguments to stack + movaps xmmword ptr [rsp + 00h], xmm0 + movaps xmmword ptr [rsp + 10h], xmm1 + movaps xmmword ptr [rsp + 20h], xmm2 + movaps xmmword ptr [rsp + 30h], xmm3 + + // save argument registers used by custom calling convention + push rdi + push rsi + + // get correct interpreter entrypoint + call C_FUNC(_ZN2Js21InterpreterStackFrame29GetAsmJsInterpreterEntryPointEPNS_20AsmJsCallStackLayoutE) + + pop rsi + pop rdi + + call rax // call appropriate template + + add rsp, 40h + pop_nonvol_reg rbp + ret +NESTED_END _ZN2Js21InterpreterStackFrame19InterpreterAsmThunkEPNS_20AsmJsCallStackLayoutE, _TEXT + +//============================================================================================================ +// Js::AsmJsExternalEntryPoint +//============================================================================================================ + +//extrn ?GetStackSizeForAsmJsUnboxing@Js@@YAHPEAVScriptFunction@1@@Z: PROC +//extrn ?UnboxAsmJsArguments@Js@@YAPEAXPEAVScriptFunction@1@PEAPEAXPEADUCallInfo@1@@Z : PROC +//extrn ?BoxAsmJsReturnValue@Js@@YAPEAXPEAVScriptFunction@1@HNM@Z : PROC +//extrn ?BoxAsmJsReturnValue@Js@@YAPEAXPEAVScriptFunction@1@HNMT__m128@@@Z : PROC + +//extrn ?GetArgsSizesArray@Js@@YAPEAIPEAVScriptFunction@1@@Z : PROC + +// int Js::AsmJsExternalEntryPoint(RecyclableObject* entryObject, CallInfo callInfo, ...) +.balign 16 +NESTED_ENTRY _ZN2Js23AsmJsExternalEntryPointEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT, NoHandler + push_nonvol_reg rbp // push rbp and adjust CFA offset + lea rbp, [rsp] + + set_cfa_register rbp, (2*8) // Set to compute CFA as: rbp + 16 (sizeof: [rbp] [ReturnAddress]) + + sub rsp, 40h + + mov [rsp + 28h], r12 + mov [rsp + 30h], r13 + + mov r12, rdi // r12: entryObject + mov r13, rsi // r13: callInfo + + // allocate stack space for unboxed values + // int GetStackSizeForAsmJsUnboxing(ScriptFunction* func) + call _ZN2Js28GetStackSizeForAsmJsUnboxingEPNS_14ScriptFunctionE + + mov rcx, r13 // arg4: callInfo + mov rsi, rsp // arg2: orig stack pointer is arg for the unboxing helper + mov r13, rsi // r13: save orig stack pointer, so that we can add it back later + add rsi, 68h // account for the changes we have already made to rsp + + sub rsp, rax // allocate additional stack space for args + // UnboxAsmJsArguments(func, origArgsLoc, argDst, callInfo) + mov rdi, r12 // arg1: func + mov rdx, rsp // arg3: argDst + + // unboxing function also does stack probe + call _ZN2Js19UnboxAsmJsArgumentsEPNS_14ScriptFunctionEPPvPcNS_8CallInfoE + // rax = target function address + + // move first 4 arguments into registers. + // don't know types other than arg0 (which is ScriptFunction *), so put in both xmm and general purpose registers + mov rdi, r12 // arg0: func + + // int GetArgsSizesArray(ScriptFunction* func) + // get args sizes of target asmjs function + // rdi has ScriptFunction* + push r13 + push rax + push rdi + sub rsp, 8h + call _ZN2Js17GetArgsSizesArrayEPNS_14ScriptFunctionE + mov r13, rax // r13: arg size + add rsp, 8h + pop rdi + pop rax + + // NOTE: Below xmm usage is non-standard. + + // Move 3 args to regs per convention. rdi already has first arg: ScriptFunction* + push r12 + // r12->unboxed args + lea r12, [rsp + 18h] // rsp + size of(r12 + r13 + ScriptFunction*) + + // r13 is arg size + cmp dword ptr [r13], 10h + je SIMDArg2 + mov rsi, [r12] // arg1 + movq xmm1, qword ptr [r12] // arg1 + add r12, 8h + jmp Arg3 + SIMDArg2: + movups xmm1, xmmword ptr[r12] + add r12, 10h + Arg3: + cmp dword ptr [r13 + 4h], 10h + je SIMDArg3 + mov rdx, [r12] // arg2 + movq xmm2, qword ptr [r12] // arg2 + add r12, 8h + jmp Arg4 + SIMDArg3: + movups xmm2, xmmword ptr[r12] + add r12, 10h + Arg4: + cmp dword ptr [r13 + 8h], 10h + je SIMDArg4 + mov rcx, [r12] // arg3 + movq xmm3, qword ptr [r12] // arg3 + jmp ArgsDone + SIMDArg4: + movups xmm3, xmmword ptr [r12] + + ArgsDone: + pop r12 // r12: func + pop r13 // r13: orig stack pointer + + // "home" arg0. other args were read from stack and already homed. + mov [rsp + 00h], rdi + + // call entry point + call rax + + // Var BoxAsmJsReturnValue(ScriptFunction* func, int intRetVal, double doubleRetVal, float floatRetVal) + mov rdi, r12 // arg0: func + mov rsi, rax // arg1: intRetVal + // movsd xmm0, xmm0 // arg2: doubleRetVal + movss xmm1, xmm0 // arg3: floatRetVal + + + // store SIMD xmm value and pointer to it as argument to box function + sub rsp, 20h + movups [rsp + 10h], xmm0 + lea r12, [rsp + 10h] + mov qword ptr [rsp], r12 + call _ZN2Js19BoxAsmJsReturnValueEPNS_14ScriptFunctionEidfDv4_f + + mov rsp, r13 // restore stack pointer + Epilogue: + mov r12, [rsp + 28h] + mov r13, [rsp + 30h] + + lea rsp, [rbp] + pop_nonvol_reg rbp + + ret +NESTED_END _ZN2Js23AsmJsExternalEntryPointEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT + +#endif // _ENABLE_DYNAMIC_THUNKS diff --git a/lib/Runtime/Library/CMakeLists.txt b/lib/Runtime/Library/CMakeLists.txt index 747e306bf29..fef1aaeed6b 100644 --- a/lib/Runtime/Library/CMakeLists.txt +++ b/lib/Runtime/Library/CMakeLists.txt @@ -1,3 +1,7 @@ +if(BuildJIT) + add_definitions(-D_ENABLE_ASM_JS=1) +endif() + set(CRLIB_SOURCE_CODES ArgumentsObject.cpp ArgumentsObjectEnumerator.cpp @@ -56,18 +60,19 @@ set(CRLIB_SOURCE_CODES JavascriptRegularExpressionResult.cpp JavascriptSet.cpp JavascriptSetIterator.cpp - # JavascriptSimdBool16x8.cpp - # JavascriptSimdBool32x4.cpp - # JavascriptSimdBool8x16.cpp - # JavascriptSimdFloat32x4.cpp - # JavascriptSimdFloat64x2.cpp - # JavascriptSimdInt16x8.cpp - # JavascriptSimdInt32x4.cpp - # JavascriptSimdInt8x16.cpp - # JavascriptSimdObject.cpp - # JavascriptSimdUint16x8.cpp - # JavascriptSimdUint32x4.cpp - # JavascriptSimdUint8x16.cpp + JavascriptSimdBool16x8.cpp + JavascriptSimdBool32x4.cpp + JavascriptSimdBool8x16.cpp + JavascriptSimdFloat32x4.cpp + JavascriptSimdFloat64x2.cpp + JavascriptSimdInt16x8.cpp + JavascriptSimdInt32x4.cpp + JavascriptSimdInt8x16.cpp + JavascriptSimdObject.cpp + JavascriptSimdType.cpp + JavascriptSimdUint16x8.cpp + JavascriptSimdUint32x4.cpp + JavascriptSimdUint8x16.cpp JavascriptString.cpp JavascriptStringEnumerator.cpp JavascriptStringIterator.cpp @@ -89,19 +94,18 @@ set(CRLIB_SOURCE_CODES RuntimeFunction.cpp RuntimeLibraryPch.cpp ScriptFunction.cpp - # xplat-todo: enable SIMDjs on Linux - # SimdBool16x8Lib.cpp - # SimdBool32x4Lib.cpp - # SimdBool8x16Lib.cpp - # SimdFloat32x4Lib.cpp - # SimdFloat64x2Lib.cpp - # SimdInt16x8Lib.cpp - # SimdInt32x4Lib.cpp - # SimdInt8x16Lib.cpp - # SimdUint16x8Lib.cpp - # SimdUint32x4Lib.cpp - # SimdUint8x16Lib.cpp SharedArrayBuffer.cpp + SimdBool16x8Lib.cpp + SimdBool32x4Lib.cpp + SimdBool8x16Lib.cpp + SimdFloat32x4Lib.cpp + SimdFloat64x2Lib.cpp + SimdInt16x8Lib.cpp + SimdInt32x4Lib.cpp + SimdInt8x16Lib.cpp + SimdUint16x8Lib.cpp + SimdUint32x4Lib.cpp + SimdUint8x16Lib.cpp SingleCharString.cpp SparseArraySegment.cpp StackScriptFunction.cpp diff --git a/lib/Runtime/Library/JavascriptFunction.cpp b/lib/Runtime/Library/JavascriptFunction.cpp index b8834693290..4428caeaffb 100644 --- a/lib/Runtime/Library/JavascriptFunction.cpp +++ b/lib/Runtime/Library/JavascriptFunction.cpp @@ -944,7 +944,7 @@ namespace Js { PROBE_STACK(function->GetScriptContext(), Js::Constants::MinStackDefault); - RUNTIME_ARGUMENTS(args, callInfo); + RUNTIME_ARGUMENTS(args, spreadIndices, function, callInfo); return JavascriptFunction::CallSpreadFunction(function, function->GetEntryPoint(), args, spreadIndices); } diff --git a/lib/Runtime/Library/JavascriptFunction.h b/lib/Runtime/Library/JavascriptFunction.h index 97b13753fb9..2712c910c90 100644 --- a/lib/Runtime/Library/JavascriptFunction.h +++ b/lib/Runtime/Library/JavascriptFunction.h @@ -90,7 +90,7 @@ namespace Js Var CallRootFunction(Arguments args, ScriptContext * scriptContext, bool inScript); Var CallRootFunctionInternal(Arguments args, ScriptContext * scriptContext, bool inScript); template - static T CallAsmJsFunction(RecyclableObject * function, void* entryPoint, uint argc, Var * argv); + static T CallAsmJsFunction(RecyclableObject * function, JavascriptMethod entryPoint, uint argc, Var * argv); template static Var CalloutHelper(RecyclableObject* function, Var thisArg, Var overridingNewTarget, Var argArray, ScriptContext* scriptContext); diff --git a/lib/Runtime/Library/amd64/JavascriptFunctionA.S b/lib/Runtime/Library/amd64/JavascriptFunctionA.S index 48ac16ad199..2ef27b57cf4 100644 --- a/lib/Runtime/Library/amd64/JavascriptFunctionA.S +++ b/lib/Runtime/Library/amd64/JavascriptFunctionA.S @@ -107,6 +107,158 @@ LOCAL_LABEL(function_done): NESTED_END amd64_CallFunction, _TEXT +//------------------------------------------------------------------------------ +#ifdef _ENABLE_ASM_JS + +//extrn ?GetStackSizeForAsmJsUnboxing@Js@@YAHPEAVScriptFunction@1@@Z: PROC +//extrn ?GetArgsSizesArray@Js@@YAPEAIPEAVScriptFunction@1@@Z : PROC + +// float CallAsmJsFunction(RecyclableObject *function, JavascriptMethod entryPoint, uint argc, Var *argv); +.balign 16 +LEAF_ENTRY _ZN2Js18JavascriptFunction17CallAsmJsFunctionIfEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_, _TEXT + jmp _ZN2Js18JavascriptFunction17CallAsmJsFunctionIiEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_ +LEAF_END _ZN2Js18JavascriptFunction17CallAsmJsFunctionIfEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_, _TEXT + +// double CallAsmJsFunction(RecyclableObject *function, JavascriptMethod entryPoint, uint argc, Var *argv); +.balign 16 +LEAF_ENTRY _ZN2Js18JavascriptFunction17CallAsmJsFunctionIdEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_, _TEXT + jmp _ZN2Js18JavascriptFunction17CallAsmJsFunctionIiEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_ +LEAF_END _ZN2Js18JavascriptFunction17CallAsmJsFunctionIdEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_, _TEXT + +// __m128 JavascriptFunction::CallAsmJsFunction(RecyclableObject * function, void* entryPoint, uint argc, Var * argv); +//.balign 16 +//??$CallAsmJsFunction@T__m128@@@JavascriptFunction@Js@@SA?AT__m128@@PEAVRecyclableObject@1@PEAXIPEAPEAX@Z PROC FRAME +// jmp _ZN2Js18JavascriptFunction17CallAsmJsFunctionIiEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_ +//??$CallAsmJsFunction@T__m128@@@JavascriptFunction@Js@@SA?AT__m128@@PEAVRecyclableObject@1@PEAXIPEAPEAX@Z ENDP + + +// int CallAsmJsFunction(RecyclableObject *function, JavascriptMethod entryPoint, uint argc, Var *argv); +.balign 16 +NESTED_ENTRY _ZN2Js18JavascriptFunction17CallAsmJsFunctionIiEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_, _TEXT, NoHandler + push_nonvol_reg rbp + mov rbp, rsp + + // Set to compute CFA as: rbp + 2 words (RA, rbp) + set_cfa_register rbp, (2*8) + + push rbx + push r12 + push r13 + push r15 + + // rdi: function + + // rax: entry point + mov rax, rsi + + // rbx: argc + mov rbx, rdx + + // r15: argv + mov r15, rcx + add r15, 8h + + // int GetArgsSizesArray(ScriptFunction* func) + push rax + push rdi + call C_FUNC(_ZN2Js17GetArgsSizesArrayEPNS_14ScriptFunctionE) + mov r12, rax + pop rdi + pop rax + + // int GetStackSizeForAsmJsUnboxing(ScriptFunction* func) + // This will return 0x20 bytes if size is below minimum. Includes space for function*. + push rax + push rdi + call C_FUNC(_ZN2Js28GetStackSizeForAsmJsUnboxingEPNS_14ScriptFunctionE) + mov r13, rax + pop rdi + pop rax + +LOCAL_LABEL(setup_stack_and_reg_args): + + // OP_CallAsmInternal checks stack space + +LOCAL_LABEL(stack_alloc): + sub rsp, r13 + + // copy all args to the new stack frame. + lea r11, [r15] + lea r10, [rsp + 8] // copy after ScriptFunction* + push r13 // save r13 +LOCAL_LABEL(copy_stack_args): + mov rsi, qword ptr [r11] + mov qword ptr [r10], rsi + add r11, 8 + add r10, 8 + sub r13, 8 + cmp r13, 8 // skipped 1 + jg LOCAL_LABEL(copy_stack_args) + pop r13 // restore r13 + + // r12 points to arg size map +LOCAL_LABEL(setup_reg_args_1): + lea r11, [r15] + // argc < 1 ? + cmp rbx, 1h + jl LOCAL_LABEL(setup_args_done) + cmp dword ptr[r12], 10h + je LOCAL_LABEL(SIMDArg1) + mov rsi, qword ptr [r11] + movq xmm1, qword ptr [r11] + add r11, 8h + jmp LOCAL_LABEL(setup_reg_args_2) +LOCAL_LABEL(SIMDArg1): + movups xmm1, xmmword ptr [r11] + add r11, 10h + +LOCAL_LABEL(setup_reg_args_2): + // argc < 2 ? + cmp rbx, 2h + jl LOCAL_LABEL(setup_args_done) + + add r12, 4 + cmp dword ptr[r12], 10h + je LOCAL_LABEL(SIMDArg2) + mov rdx, qword ptr [r11] + movq xmm2, qword ptr [r11] + add r11, 8h + jmp LOCAL_LABEL(setup_reg_args_3) +LOCAL_LABEL(SIMDArg2): + movups xmm2, xmmword ptr [r11] + add r11, 10h + +LOCAL_LABEL(setup_reg_args_3): + // argc < 3 ? + cmp rbx, 3h + jl LOCAL_LABEL(setup_args_done) + add r12, 4 + cmp dword ptr[r12], 10h + je LOCAL_LABEL(SIMDArg3) + mov rcx, qword ptr [r11] + movq xmm3, qword ptr [r11] +LOCAL_LABEL(SIMDArg3): + movups xmm3, xmmword ptr [r11] + +LOCAL_LABEL(setup_args_done): + // "home" arg0. other args already copied. + mov [rsp], rdi + call rax + add rsp, r13 // restore stack + +// done: + pop r15 + pop r13 + pop r12 + pop rbx + pop_nonvol_reg rbp + ret +NESTED_END _ZN2Js18JavascriptFunction17CallAsmJsFunctionIiEET_PNS_16RecyclableObjectEPFPvS4_NS_8CallInfoEzEjPS5_, _TEXT + +#endif // _ENABLE_ASM_JS + + +//------------------------------------------------------------------------------ .balign 16 NESTED_ENTRY _ZN2Js18JavascriptFunction20DeferredParsingThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT, NoHandler push_nonvol_reg rbp @@ -124,13 +276,18 @@ NESTED_ENTRY _ZN2Js18JavascriptFunction20DeferredParsingThunkEPNS_16RecyclableOb pop_register rsi pop_register rdi - pop_nonvol_reg rbp + mov rdi, qword ptr [rbp + 10h] // re-load function, might have been changed by DeferredParse. + // e.g. StackScriptFunction is Boxed + // previous push/pop rdi is for stack alignment + + pop_nonvol_reg rbp jmp rax NESTED_END _ZN2Js18JavascriptFunction20DeferredParsingThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT +//------------------------------------------------------------------------------ // Var JavascriptFunction::DeferredDeserializeThunk( // RecyclableObject* function, CallInfo callInfo, ...) .balign 16 @@ -151,8 +308,8 @@ NESTED_ENTRY _ZN2Js18JavascriptFunction24DeferredDeserializeThunkEPNS_16Recyclab pop_register rsi pop_register rdi - pop_nonvol_reg rbp + pop_nonvol_reg rbp jmp rax NESTED_END _ZN2Js18JavascriptFunction24DeferredDeserializeThunkEPNS_16RecyclableObjectENS_8CallInfoEz, _TEXT diff --git a/lib/Runtime/Library/amd64/JavascriptFunctionA.asm b/lib/Runtime/Library/amd64/JavascriptFunctionA.asm index d8a27e1eff1..e44fa3de9f8 100644 --- a/lib/Runtime/Library/amd64/JavascriptFunctionA.asm +++ b/lib/Runtime/Library/amd64/JavascriptFunctionA.asm @@ -227,33 +227,33 @@ extrn ?GetArgsSizesArray@Js@@YAPEAIPEAVScriptFunction@1@@Z : PROC ; float CallAsmJsFunction(RecyclableObject *function, JavascriptMethod entryPoint, uint argc, Var *argv); align 16 -??$CallAsmJsFunction@M@JavascriptFunction@Js@@SAMPEAVRecyclableObject@1@PEAXIPEAPEAX@Z PROC FRAME +??$CallAsmJsFunction@M@JavascriptFunction@Js@@SAMPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z PROC FRAME .setframe rbp, 0 .endprolog - rex_jmp_reg ??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@PEAXIPEAPEAX@Z -??$CallAsmJsFunction@M@JavascriptFunction@Js@@SAMPEAVRecyclableObject@1@PEAXIPEAPEAX@Z ENDP + rex_jmp_reg ??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z +??$CallAsmJsFunction@M@JavascriptFunction@Js@@SAMPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z ENDP ; double CallAsmJsFunction(RecyclableObject *function, JavascriptMethod entryPoint, uint argc, Var *argv); align 16 -??$CallAsmJsFunction@N@JavascriptFunction@Js@@SANPEAVRecyclableObject@1@PEAXIPEAPEAX@Z PROC FRAME +??$CallAsmJsFunction@N@JavascriptFunction@Js@@SANPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z PROC FRAME .setframe rbp, 0 .endprolog - rex_jmp_reg ??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@PEAXIPEAPEAX@Z -??$CallAsmJsFunction@N@JavascriptFunction@Js@@SANPEAVRecyclableObject@1@PEAXIPEAPEAX@Z ENDP + rex_jmp_reg ??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z +??$CallAsmJsFunction@N@JavascriptFunction@Js@@SANPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z ENDP ; __m128 JavascriptFunction::CallAsmJsFunction(RecyclableObject * function, void* entryPoint, uint argc, Var * argv); align 16 -??$CallAsmJsFunction@T__m128@@@JavascriptFunction@Js@@SA?AT__m128@@PEAVRecyclableObject@1@PEAXIPEAPEAX@Z PROC FRAME +??$CallAsmJsFunction@T__m128@@@JavascriptFunction@Js@@SA?AT__m128@@PEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z PROC FRAME .setframe rbp, 0 .endprolog - rex_jmp_reg ??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@PEAXIPEAPEAX@Z -??$CallAsmJsFunction@T__m128@@@JavascriptFunction@Js@@SA?AT__m128@@PEAVRecyclableObject@1@PEAXIPEAPEAX@Z ENDP + rex_jmp_reg ??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z +??$CallAsmJsFunction@T__m128@@@JavascriptFunction@Js@@SA?AT__m128@@PEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z ENDP ; int CallAsmJsFunction(RecyclableObject *function, JavascriptMethod entryPoint, uint argc, Var *argv); align 16 -??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@PEAXIPEAPEAX@Z PROC FRAME +??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z PROC FRAME ; save these to stack for interpreter mov qword ptr [rsp + 8h], rcx @@ -408,7 +408,7 @@ done: pop rbx ret -??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@PEAXIPEAPEAX@Z ENDP +??$CallAsmJsFunction@H@JavascriptFunction@Js@@SAHPEAVRecyclableObject@1@P6APEAX0UCallInfo@1@ZZIPEAPEAX@Z ENDP endif ;; _ENABLE_ASM_JS diff --git a/lib/Runtime/PlatformAgnostic/Platform/Linux/UnicodeText.ICU.cpp b/lib/Runtime/PlatformAgnostic/Platform/Linux/UnicodeText.ICU.cpp index 4c7f83777cb..ecc66c63305 100644 --- a/lib/Runtime/PlatformAgnostic/Platform/Linux/UnicodeText.ICU.cpp +++ b/lib/Runtime/PlatformAgnostic/Platform/Linux/UnicodeText.ICU.cpp @@ -252,7 +252,8 @@ namespace PlatformAgnostic Assert(false); } - if (U_FAILURE(errorCode)) + if (U_FAILURE(errorCode) && + !(destLength == 0 && errorCode == U_BUFFER_OVERFLOW_ERROR)) { *pErrorOut = TranslateUErrorCode(errorCode); return -1; diff --git a/lib/Runtime/Runtime.h b/lib/Runtime/Runtime.h index 5af6397cecb..dec99cff9cd 100644 --- a/lib/Runtime/Runtime.h +++ b/lib/Runtime/Runtime.h @@ -147,12 +147,12 @@ namespace Js class ActivationObject; class JavascriptNumber; class JavascriptNumberObject; - + class ScriptContextProfiler; struct RestrictedErrorStrings; class JavascriptError; - + //SIMD_JS // SIMD class JavascriptSIMDObject; @@ -372,6 +372,7 @@ enum tagDEBUG_EVENT_INFO_TYPE #include "JITClient.h" #else #include "JITTypes.h" +#include "../JITClient/JITManager.h" #endif #include "Base/SourceHolder.h" @@ -453,7 +454,7 @@ enum tagDEBUG_EVENT_INFO_TYPE #include "Library/MathLibrary.h" -// xplat-todo: We should get rid of this altogether and move the functionality it +// xplat-todo: We should get rid of this altogether and move the functionality it // encapsulates to the Platform Agnostic Interface #ifdef _WIN32 #if defined(ENABLE_GLOBALIZATION) || ENABLE_UNICODE_API diff --git a/pal/inc/pal.h b/pal/inc/pal.h index a82d6ce1aac..8d498a67100 100644 --- a/pal/inc/pal.h +++ b/pal/inc/pal.h @@ -3829,6 +3829,18 @@ GetModuleHandleW( #define GetModuleHandle GetModuleHandleW #endif +PALIMPORT +BOOL +PALAPI +GetModuleHandleExW( + IN DWORD dwFlags, + IN OPTIONAL LPCWSTR lpModuleName, + OUT HMODULE *phModule); + +#ifdef UNICODE +#define GetModuleHandleEx GetModuleHandleExW +#endif + // Get base address of the module containing a given symbol PALAPI LPCVOID @@ -5325,7 +5337,9 @@ BitScanForward( IN UINT qwMask) { unsigned char bRet = FALSE; - int iIndex = __builtin_ffsl(qwMask); + static_assert(sizeof(qwMask) <= sizeof(int), + "use correct __builtin_ffs??? variant"); + int iIndex = __builtin_ffs(qwMask); if (iIndex != 0) { // Set the Index after deducting unity @@ -5346,7 +5360,9 @@ BitScanForward64( IN UINT64 qwMask) { unsigned char bRet = FALSE; - int iIndex = __builtin_ffsl(qwMask); + static_assert(sizeof(qwMask) <= sizeof(long long), + "use correct __builtin_ffs??? variant"); + int iIndex = __builtin_ffsll(qwMask); if (iIndex != 0) { // Set the Index after deducting unity @@ -5370,6 +5386,8 @@ BitScanReverse( unsigned char bRet = FALSE; if (qwMask != 0) { + static_assert(sizeof(qwMask) <= sizeof(unsigned int), + "use correct __builtin_clz??? variant"); int countLeadingZero = __builtin_clz(qwMask); *Index = (DWORD)(sizeof(qwMask) * 8 - 1 - countLeadingZero); bRet = TRUE; @@ -5391,7 +5409,9 @@ BitScanReverse64( unsigned char bRet = FALSE; if (qwMask != 0) { - int countLeadingZero = __builtin_clz(qwMask); + static_assert(sizeof(qwMask) <= sizeof(unsigned long long), + "use correct __builtin_clz??? variant"); + int countLeadingZero = __builtin_clzll(qwMask); *Index = (DWORD)(sizeof(qwMask) * 8 - 1 - countLeadingZero); bRet = TRUE; } diff --git a/pal/inc/rt/palrt.h b/pal/inc/rt/palrt.h index 568c5490511..5dbbd6c7f9a 100644 --- a/pal/inc/rt/palrt.h +++ b/pal/inc/rt/palrt.h @@ -890,11 +890,6 @@ Remember to fix the errcode defintion in safecrt.h. #define _vscprintf _vscprintf_unsafe #define _vscwprintf _vscwprintf_unsafe -#define sprintf_s _snprintf -#define swprintf_s _snwprintf -#define vsprintf_s _vsnprintf -#define vswprintf_s _vsnwprintf - extern "C++" { #include diff --git a/pal/inc/rt/safecrt.h b/pal/inc/rt/safecrt.h index bf9b4fc398d..99e3d9fb079 100644 --- a/pal/inc/rt/safecrt.h +++ b/pal/inc/rt/safecrt.h @@ -319,7 +319,7 @@ typedef int errno_t; /* standard */ #endif #endif -/* put a null terminator at the beginning of the string and then calls _SAFECRT__FILL_STRING; +/* put a null terminator at the beginning of the string and then calls _SAFECRT__FILL_STRING; * assume that the string has been validated with _SAFECRT__VALIDATE_STRING */ #if !defined(_SAFECRT__RESET_STRING) @@ -464,7 +464,7 @@ void __cdecl _invalid_parameter(const WCHAR *_Message, const WCHAR *_FunctionNam #endif /* _SAFECRT_DEFINE_TCS_MACROS */ /* strcpy_s */ -/* +/* * strcpy_s, wcscpy_s copy string _Src into _Dst; * will call _SAFECRT_INVALID_PARAMETER if string _Src does not fit into _Dst */ @@ -485,18 +485,18 @@ errno_t __cdecl strcpy_s(char (&_Dst)[_SizeInBytes], const char *_Src) #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl strcpy_s(char *_Dst, size_t _SizeInBytes, const char *_Src) { char *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; while ((*p++ = *_Src++) != 0 && --available > 0) @@ -508,11 +508,11 @@ errno_t __cdecl strcpy_s(char *_Dst, size_t _SizeInBytes, const char *_Src) _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); _SAFECRT__RETURN_BUFFER_TOO_SMALL(_Dst, _SizeInBytes); } - + _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif /* wcscpy_s */ @@ -529,23 +529,23 @@ errno_t __cdecl wcscpy_s(WCHAR (&_Dst)[_SizeInWords], const WCHAR *_Src) #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl wcscpy_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src) { WCHAR *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInWords); _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInWords); - + p = _Dst; available = _SizeInWords; while ((*p++ = *_Src++) != 0 && --available > 0) { } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInWords); @@ -554,7 +554,7 @@ errno_t __cdecl wcscpy_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src) _SAFECRT__FILL_STRING(_Dst, _SizeInWords, _SizeInWords - available + 1); return 0; } - + #endif /* _mbscpy_s */ @@ -573,23 +573,23 @@ errno_t __cdecl _mbscpy_s(unsigned char (&_Dst)[_SizeInBytes], const unsigned ch #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbscpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsigned char *_Src) { unsigned char *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; while ((*p++ = *_Src++) != 0 && --available > 0) { } - + if (available == 0) { if (*_Src == 0 && _SAFECRT__ISMBBLEAD(p[-1])) @@ -610,17 +610,17 @@ errno_t __cdecl _mbscpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsign _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ /* strncpy_s */ -/* +/* * strncpy_s, wcsncpy_s copy at max _Count characters from string _Src into _Dst; * string _Dst will always be null-terminated; * will call _SAFECRT_INVALID_PARAMETER if there is not enough space in _Dst; - * if _Count == _TRUNCATE, we will copy as many characters as we can from _Src into _Dst, and + * if _Count == _TRUNCATE, we will copy as many characters as we can from _Src into _Dst, and * return STRUNCATE if _Src does not entirely fit into _Dst (we will not call _SAFECRT_INVALID_PARAMETER); * if _Count == 0, then (_Dst == nullptr && _SizeInBytes == 0) is allowed */ @@ -637,19 +637,19 @@ errno_t __cdecl strncpy_s(char (&_Dst)[_SizeInBytes], const char *_Src, size_t _ #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl strncpy_s(char *_Dst, size_t _SizeInBytes, const char *_Src, size_t _Count) { char *p; size_t available; - + if (_Count == 0 && _Dst == nullptr && _SizeInBytes == 0) { /* this case is allowed; nothing to do */ return 0; } - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); if (_Count == 0) @@ -659,7 +659,7 @@ errno_t __cdecl strncpy_s(char *_Dst, size_t _SizeInBytes, const char *_Src, siz return 0; } _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; if (_Count == _TRUNCATE) @@ -678,7 +678,7 @@ errno_t __cdecl strncpy_s(char *_Dst, size_t _SizeInBytes, const char *_Src, siz *p = 0; } } - + if (available == 0) { if (_Count == _TRUNCATE) @@ -692,7 +692,7 @@ errno_t __cdecl strncpy_s(char *_Dst, size_t _SizeInBytes, const char *_Src, siz _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif /* wcsncpy_s */ @@ -709,19 +709,19 @@ errno_t __cdecl wcsncpy_s(WCHAR (&_Dst)[_SizeInWords], const WCHAR *_Src, size_t #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl wcsncpy_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, size_t _Count) { WCHAR *p; size_t available; - + if (_Count == 0 && _Dst == nullptr && _SizeInWords == 0) { /* this case is allowed; nothing to do */ return 0; } - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInWords); if (_Count == 0) @@ -731,7 +731,7 @@ errno_t __cdecl wcsncpy_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, s return 0; } _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInWords); - + p = _Dst; available = _SizeInWords; if (_Count == _TRUNCATE) @@ -750,7 +750,7 @@ errno_t __cdecl wcsncpy_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, s *p = 0; } } - + if (available == 0) { if (_Count == _TRUNCATE) @@ -764,7 +764,7 @@ errno_t __cdecl wcsncpy_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, s _SAFECRT__FILL_STRING(_Dst, _SizeInWords, _SizeInWords - available + 1); return 0; } - + #endif /* _mbsnbcpy_s */ @@ -783,19 +783,19 @@ errno_t __cdecl _mbsnbcpy_s(unsigned char (&_Dst)[_SizeInBytes], const unsigned #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbsnbcpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsigned char *_Src, size_t _CountInBytes) { unsigned char *p; size_t available; - + if (_CountInBytes == 0 && _Dst == nullptr && _SizeInBytes == 0) { /* this case is allowed; nothing to do */ return 0; } - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); if (_CountInBytes == 0) @@ -805,7 +805,7 @@ errno_t __cdecl _mbsnbcpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi return 0; } _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; if (_CountInBytes == _TRUNCATE) @@ -824,7 +824,7 @@ errno_t __cdecl _mbsnbcpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi *p++ = 0; } } - + if (available == 0) { if ((*_Src == 0 || _CountInBytes == 1) && _SAFECRT__ISMBBLEAD(p[-1])) @@ -858,7 +858,7 @@ errno_t __cdecl _mbsnbcpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ @@ -879,19 +879,19 @@ errno_t __cdecl _mbsncpy_s(unsigned char (&_Dst)[_SizeInBytes], const unsigned c #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbsncpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsigned char *_Src, size_t _CountInChars) { unsigned char *p; size_t available; - + if (_CountInChars == 0 && _Dst == nullptr && _SizeInBytes == 0) { /* this case is allowed; nothing to do */ return 0; } - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); if (_CountInChars == 0) @@ -901,7 +901,7 @@ errno_t __cdecl _mbsncpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig return 0; } _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; if (_CountInChars == _TRUNCATE) @@ -946,7 +946,7 @@ errno_t __cdecl _mbsncpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig *p++ = 0; } } - + if (available == 0) { if (_CountInChars == _TRUNCATE) @@ -968,13 +968,13 @@ errno_t __cdecl _mbsncpy_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ /* strcat_s */ -/* +/* * strcat_s, wcscat_s append string _Src to _Dst; * will call _SAFECRT_INVALID_PARAMETER if there is not enough space in _Dst */ @@ -991,17 +991,17 @@ errno_t __cdecl strcat_s(char (&_Dst)[_SizeInBytes], const char *_Src) #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl strcat_s(char *_Dst, size_t _SizeInBytes, const char *_Src) { char *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; while (available > 0 && *p != 0) @@ -1009,17 +1009,17 @@ errno_t __cdecl strcat_s(char *_Dst, size_t _SizeInBytes, const char *_Src) p++; available--; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); _SAFECRT__RETURN_DEST_NOT_NULL_TERMINATED(_Dst, _SizeInBytes); } - + while ((*p++ = *_Src++) != 0 && --available > 0) { } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); @@ -1028,7 +1028,7 @@ errno_t __cdecl strcat_s(char *_Dst, size_t _SizeInBytes, const char *_Src) _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif /* wcscat_s */ @@ -1045,17 +1045,17 @@ errno_t __cdecl wcscat_s(WCHAR (&_Dst)[_SizeInWords], const WCHAR *_Src) #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl wcscat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src) { WCHAR *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInWords); _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInWords); - + p = _Dst; available = _SizeInWords; while (available > 0 && *p != 0) @@ -1063,17 +1063,17 @@ errno_t __cdecl wcscat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src) p++; available--; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInWords); _SAFECRT__RETURN_DEST_NOT_NULL_TERMINATED(_Dst, _SizeInWords); } - + while ((*p++ = *_Src++) != 0 && --available > 0) { } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInWords); @@ -1082,7 +1082,7 @@ errno_t __cdecl wcscat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src) _SAFECRT__FILL_STRING(_Dst, _SizeInWords, _SizeInWords - available + 1); return 0; } - + #endif /* _mbscat_s */ @@ -1101,17 +1101,17 @@ errno_t __cdecl _mbscat_s(unsigned char (&_Dst)[_SizeInBytes], const unsigned ch #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbscat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsigned char *_Src) { unsigned char *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; while (available > 0 && *p != 0) @@ -1119,7 +1119,7 @@ errno_t __cdecl _mbscat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsign p++; available--; } - + if (available == 0) { if (*p == 0 && _SAFECRT__ISMBBLEAD(p[-1])) @@ -1142,11 +1142,11 @@ errno_t __cdecl _mbscat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsign *p = 0; available++; } - + while ((*p++ = *_Src++) != 0 && --available > 0) { } - + if (available == 0) { if (*_Src == 0 && _SAFECRT__ISMBBLEAD(p[-1])) @@ -1167,17 +1167,17 @@ errno_t __cdecl _mbscat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsign _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ /* strncat_s */ -/* +/* * strncat_s, wcsncat_s append at max _Count characters from string _Src to _Dst; * string _Dst will always be null-terminated; * will call _SAFECRT_INVALID_PARAMETER if there is not enough space in _Dst; - * if _Count == _TRUNCATE, we will append as many characters as we can from _Src to _Dst, and + * if _Count == _TRUNCATE, we will append as many characters as we can from _Src to _Dst, and * return STRUNCATE if _Src does not entirely fit into _Dst (we will not call _SAFECRT_INVALID_PARAMETER); * if _Count == 0, then (_Dst == nullptr && _SizeInBytes == 0) is allowed */ @@ -1194,7 +1194,7 @@ errno_t __cdecl strncat_s(char (&_Dst)[_SizeInBytes], const char *_Src, size_t _ #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl strncat_s(char *_Dst, size_t _SizeInBytes, const char *_Src, size_t _Count) { @@ -1211,7 +1211,7 @@ errno_t __cdecl strncat_s(char *_Dst, size_t _SizeInBytes, const char *_Src, siz { _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); } - + p = _Dst; available = _SizeInBytes; while (available > 0 && *p != 0) @@ -1219,13 +1219,13 @@ errno_t __cdecl strncat_s(char *_Dst, size_t _SizeInBytes, const char *_Src, siz p++; available--; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); _SAFECRT__RETURN_DEST_NOT_NULL_TERMINATED(_Dst, _SizeInBytes); } - + if (_Count == _TRUNCATE) { while ((*p++ = *_Src++) != 0 && --available > 0) @@ -1243,7 +1243,7 @@ errno_t __cdecl strncat_s(char *_Dst, size_t _SizeInBytes, const char *_Src, siz *p = 0; } } - + if (available == 0) { if (_Count == _TRUNCATE) @@ -1257,7 +1257,7 @@ errno_t __cdecl strncat_s(char *_Dst, size_t _SizeInBytes, const char *_Src, siz _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif /* wcsncat_s */ @@ -1274,7 +1274,7 @@ errno_t __cdecl wcsncat_s(WCHAR (&_Dst)[_SizeInWords], const WCHAR *_Src, size_t #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl wcsncat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, size_t _Count) { @@ -1291,7 +1291,7 @@ errno_t __cdecl wcsncat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, s { _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInWords); } - + p = _Dst; available = _SizeInWords; while (available > 0 && *p != 0) @@ -1299,13 +1299,13 @@ errno_t __cdecl wcsncat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, s p++; available--; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInWords); _SAFECRT__RETURN_DEST_NOT_NULL_TERMINATED(_Dst, _SizeInWords); } - + if (_Count == _TRUNCATE) { while ((*p++ = *_Src++) != 0 && --available > 0) @@ -1323,7 +1323,7 @@ errno_t __cdecl wcsncat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, s *p = 0; } } - + if (available == 0) { if (_Count == _TRUNCATE) @@ -1337,7 +1337,7 @@ errno_t __cdecl wcsncat_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Src, s _SAFECRT__FILL_STRING(_Dst, _SizeInWords, _SizeInWords - available + 1); return 0; } - + #endif /* _mbsnbcat_s */ @@ -1356,7 +1356,7 @@ errno_t __cdecl _mbsnbcat_s(unsigned char (&_Dst)[_SizeInBytes], const unsigned #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbsnbcat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsigned char *_Src, size_t _CountInBytes) { @@ -1373,7 +1373,7 @@ errno_t __cdecl _mbsnbcat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi { _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); } - + p = _Dst; available = _SizeInBytes; while (available > 0 && *p != 0) @@ -1381,7 +1381,7 @@ errno_t __cdecl _mbsnbcat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi p++; available--; } - + if (available == 0) { if (*p == 0 && _SAFECRT__ISMBBLEAD(p[-1])) @@ -1404,7 +1404,7 @@ errno_t __cdecl _mbsnbcat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi *p = 0; available++; } - + if (_CountInBytes == _TRUNCATE) { while ((*p++ = *_Src++) != 0 && --available > 0) @@ -1422,7 +1422,7 @@ errno_t __cdecl _mbsnbcat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi *p++ = 0; } } - + if (available == 0) { if ((*_Src == 0 || _CountInBytes == 1) && _SAFECRT__ISMBBLEAD(p[-1])) @@ -1456,7 +1456,7 @@ errno_t __cdecl _mbsnbcat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsi _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ @@ -1477,7 +1477,7 @@ errno_t __cdecl _mbsncat_s(unsigned char (&_Dst)[_SizeInBytes], const unsigned c #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbsncat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsigned char *_Src, size_t _CountInChars) { @@ -1494,7 +1494,7 @@ errno_t __cdecl _mbsncat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig { _SAFECRT__VALIDATE_POINTER_RESET_STRING(_Src, _Dst, _SizeInBytes); } - + p = _Dst; available = _SizeInBytes; while (available > 0 && *p != 0) @@ -1502,7 +1502,7 @@ errno_t __cdecl _mbsncat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig p++; available--; } - + if (available == 0) { if (*p == 0 && _SAFECRT__ISMBBLEAD(p[-1])) @@ -1525,7 +1525,7 @@ errno_t __cdecl _mbsncat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig *p = 0; available++; } - + if (_CountInChars == _TRUNCATE) { while ((*p++ = *_Src++) != 0 && --available > 0) @@ -1568,7 +1568,7 @@ errno_t __cdecl _mbsncat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig *p++ = 0; } } - + if (available == 0) { if (_CountInChars == _TRUNCATE) @@ -1590,13 +1590,13 @@ errno_t __cdecl _mbsncat_s(unsigned char *_Dst, size_t _SizeInBytes, const unsig _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ /* _strset_s */ -/* +/* * _strset_s, _wcsset_s ; * will call _SAFECRT_INVALID_PARAMETER if _Dst is not null terminated. */ @@ -1613,23 +1613,23 @@ errno_t __cdecl _strset_s(char (&_Dst)[_SizeInBytes], int _Value) #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _strset_s(char *_Dst, size_t _SizeInBytes, int _Value) { char *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; while (*p != 0 && --available > 0) { *p++ = (char)_Value; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); @@ -1638,7 +1638,7 @@ errno_t __cdecl _strset_s(char *_Dst, size_t _SizeInBytes, int _Value) _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif /* _wcsset_s */ @@ -1655,23 +1655,23 @@ errno_t __cdecl _wcsset_s(WCHAR (&_Dst)[_SizeInWords], WCHAR _Value) #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _wcsset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value) { WCHAR *p; size_t available; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInWords); - + p = _Dst; available = _SizeInWords; while (*p != 0 && --available > 0) { *p++ = (WCHAR)_Value; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInWords); @@ -1680,7 +1680,7 @@ errno_t __cdecl _wcsset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value) _SAFECRT__FILL_STRING(_Dst, _SizeInWords, _SizeInWords - available + 1); return 0; } - + #endif /* _mbsset_s */ @@ -1699,7 +1699,7 @@ errno_t __cdecl _mbsset_s(unsigned char (&_Dst)[_SizeInBytes], unsigned int _Val #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbsset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned int _Value) { @@ -1707,10 +1707,10 @@ errno_t __cdecl _mbsset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned int unsigned char *p; size_t available; unsigned char highval, lowval; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; highval = (unsigned char)(_Value >> 8); @@ -1757,7 +1757,7 @@ errno_t __cdecl _mbsset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned int *p++ = lowval; } } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); @@ -1773,13 +1773,13 @@ errno_t __cdecl _mbsset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned int return 0; } } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ /* _strnset_s */ -/* +/* * _strnset_s, _wcsnset_s ; * will call _SAFECRT_INVALID_PARAMETER if _Dst is not null terminated. */ @@ -1796,13 +1796,13 @@ errno_t __cdecl _strnset_s(char (&_Dst)[_SizeInBytes], int _Value, size_t _Count #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _strnset_s(char *_Dst, size_t _SizeInBytes, int _Value, size_t _Count) { char *p; size_t available; - + /* validation section */ if (_Count == 0 && _Dst == nullptr && _SizeInBytes == 0) { @@ -1810,7 +1810,7 @@ errno_t __cdecl _strnset_s(char *_Dst, size_t _SizeInBytes, int _Value, size_t _ return 0; } _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; while (*p != 0 && _Count > 0 && --available > 0) @@ -1818,7 +1818,7 @@ errno_t __cdecl _strnset_s(char *_Dst, size_t _SizeInBytes, int _Value, size_t _ *p++ = (char)_Value; --_Count; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); @@ -1831,7 +1831,7 @@ errno_t __cdecl _strnset_s(char *_Dst, size_t _SizeInBytes, int _Value, size_t _ _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, _SizeInBytes - available + 1); return 0; } - + #endif /* _wcsnset_s */ @@ -1848,13 +1848,13 @@ errno_t __cdecl _wcsnset_s(WCHAR (&_Dst)[_SizeInWords], WCHAR _Value, size_t _Co #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _wcsnset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value, size_t _Count) { WCHAR *p; size_t available; - + /* validation section */ if (_Count == 0 && _Dst == nullptr && _SizeInWords == 0) { @@ -1862,7 +1862,7 @@ errno_t __cdecl _wcsnset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value, size_ return 0; } _SAFECRT__VALIDATE_STRING(_Dst, _SizeInWords); - + p = _Dst; available = _SizeInWords; while (*p != 0 && _Count > 0 && --available > 0) @@ -1870,7 +1870,7 @@ errno_t __cdecl _wcsnset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value, size_ *p++ = (WCHAR)_Value; --_Count; } - + if (available == 0) { _SAFECRT__RESET_STRING(_Dst, _SizeInWords); @@ -1883,7 +1883,7 @@ errno_t __cdecl _wcsnset_s(WCHAR *_Dst, size_t _SizeInWords, WCHAR _Value, size_ _SAFECRT__FILL_STRING(_Dst, _SizeInWords, _SizeInWords - available + 1); return 0; } - + #endif /* _mbsnbset_s */ @@ -1902,7 +1902,7 @@ errno_t __cdecl _mbsnbset_s(unsigned char (&_Dst)[_SizeInBytes], unsigned int _V #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbsnbset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned int _Value, size_t _CountInBytes) { @@ -1910,7 +1910,7 @@ errno_t __cdecl _mbsnbset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned i unsigned char *p; size_t available; unsigned char highval, lowval; - + /* validation section */ if (_CountInBytes == 0 && _Dst == nullptr && _SizeInBytes == 0) { @@ -1918,7 +1918,7 @@ errno_t __cdecl _mbsnbset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned i return 0; } _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; highval = (unsigned char)(_Value >> 8); @@ -1988,7 +1988,7 @@ errno_t __cdecl _mbsnbset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned i return 0; } } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ @@ -2009,7 +2009,7 @@ errno_t __cdecl _mbsnset_s(unsigned char (&_Dst)[_SizeInBytes], unsigned int _Va #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbsnset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned int _Value, size_t _CountInChars) { @@ -2017,7 +2017,7 @@ errno_t __cdecl _mbsnset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned in unsigned char *p; size_t available; unsigned char highval, lowval; - + /* validation section */ if (_CountInChars == 0 && _Dst == nullptr && _SizeInBytes == 0) { @@ -2025,7 +2025,7 @@ errno_t __cdecl _mbsnset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned in return 0; } _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); - + p = _Dst; available = _SizeInBytes; highval = (unsigned char)(_Value >> 8); @@ -2094,7 +2094,7 @@ errno_t __cdecl _mbsnset_s(unsigned char *_Dst, size_t _SizeInBytes, unsigned in return 0; } } - + #endif #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ @@ -2115,7 +2115,7 @@ errno_t __cdecl _mbccpy_s(unsigned char (&_Dst)[_SizeInBytes], int *_PCopied, co #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _mbccpy_s(unsigned char *_Dst, size_t _SizeInBytes, int *_PCopied, const unsigned char *_Src) { @@ -2127,7 +2127,7 @@ errno_t __cdecl _mbccpy_s(unsigned char *_Dst, size_t _SizeInBytes, int *_PCopie *_Dst = '\0'; _SAFECRT__RETURN_EINVAL; } - + /* copy */ if (_SAFECRT__ISMBBLEAD(*_Src)) { @@ -2154,7 +2154,7 @@ errno_t __cdecl _mbccpy_s(unsigned char *_Dst, size_t _SizeInBytes, int *_PCopie *_Dst = *_Src; if (_PCopied != nullptr) { *_PCopied = 1; }; } - + return 0; } #endif @@ -2162,7 +2162,7 @@ errno_t __cdecl _mbccpy_s(unsigned char *_Dst, size_t _SizeInBytes, int *_PCopie #endif /* _SAFECRT_DEFINE_MBS_FUNCTIONS */ /* strtok_s */ -/* +/* * strtok_s, wcstok_s ; * uses _Context to keep track of the position in the string. */ @@ -2170,7 +2170,7 @@ _SAFECRT__EXTERN_C char * __cdecl strtok_s(char *_String, const char *_Control, char **_Context); #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE char * __cdecl strtok_s(char *_String, const char *_Control, char **_Context) { @@ -2178,23 +2178,23 @@ char * __cdecl strtok_s(char *_String, const char *_Control, char **_Context) const unsigned char *ctl = (const unsigned char *)_Control; unsigned char map[32]; int count; - + /* validation section */ _SAFECRT__VALIDATE_POINTER_ERROR_RETURN(_Context, EINVAL, nullptr); _SAFECRT__VALIDATE_POINTER_ERROR_RETURN(_Control, EINVAL, nullptr); _SAFECRT__VALIDATE_CONDITION_ERROR_RETURN(_String != nullptr || *_Context != nullptr, EINVAL, nullptr); - + /* Clear control map */ for (count = 0; count < 32; count++) { map[count] = 0; } - + /* Set bits in delimiter table */ do { map[*ctl >> 3] |= (1 << (*ctl & 7)); } while (*ctl++); - + /* If string is nullptr, set str to the saved * pointer (i.e., continue breaking tokens out of the string * from the last strtok call) */ @@ -2206,7 +2206,7 @@ char * __cdecl strtok_s(char *_String, const char *_Control, char **_Context) { str = (unsigned char *)*_Context; } - + /* Find beginning of token (skip over leading delimiters). Note that * there is no token iff this loop sets str to point to the terminal * null (*str == 0) */ @@ -2214,9 +2214,9 @@ char * __cdecl strtok_s(char *_String, const char *_Control, char **_Context) { str++; } - + _String = (char *)str; - + /* Find the end of the token. If it is not the end of the string, * put a null there. */ for ( ; *str != 0 ; str++ ) @@ -2227,10 +2227,10 @@ char * __cdecl strtok_s(char *_String, const char *_Control, char **_Context) break; } } - + /* Update context */ *_Context = (char *)str; - + /* Determine if a token has been found. */ if (_String == (char *)str) { @@ -2248,24 +2248,24 @@ _SAFECRT__EXTERN_C WCHAR * __cdecl wcstok_s(WCHAR *_String, const WCHAR *_Control, WCHAR **_Context); #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE WCHAR * __cdecl wcstok_s(WCHAR *_String, const WCHAR *_Control, WCHAR **_Context) { WCHAR *token; const WCHAR *ctl; - + /* validation section */ _SAFECRT__VALIDATE_POINTER_ERROR_RETURN(_Context, EINVAL, nullptr); _SAFECRT__VALIDATE_POINTER_ERROR_RETURN(_Control, EINVAL, nullptr); _SAFECRT__VALIDATE_CONDITION_ERROR_RETURN(_String != nullptr || *_Context != nullptr, EINVAL, nullptr); - + /* If string==nullptr, continue with previous string */ if (!_String) { _String = *_Context; } - + /* Find beginning of token (skip over leading delimiters). Note that * there is no token iff this loop sets string to point to the terminal null. */ for ( ; *_String != 0 ; _String++) @@ -2277,9 +2277,9 @@ WCHAR * __cdecl wcstok_s(WCHAR *_String, const WCHAR *_Control, WCHAR **_Context break; } } - + token = _String; - + /* Find the end of the token. If it is not the end of the string, * put a null there. */ for ( ; *_String != 0 ; _String++) @@ -2292,10 +2292,10 @@ WCHAR * __cdecl wcstok_s(WCHAR *_String, const WCHAR *_Control, WCHAR **_Context break; } } - + /* Update the context */ *_Context = _String; - + /* Determine if a token has been found. */ if (token == _String) { @@ -2315,25 +2315,25 @@ _SAFECRT__EXTERN_C unsigned char * __cdecl _mbstok_s(unsigned char *_String, const unsigned char *_Control, unsigned char **_Context); #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE unsigned char * __cdecl _mbstok_s(unsigned char *_String, const unsigned char *_Control, unsigned char **_Context) { unsigned char *token; const unsigned char *ctl; int dbc; - + /* validation section */ _SAFECRT__VALIDATE_POINTER_ERROR_RETURN(_Context, EINVAL, nullptr); _SAFECRT__VALIDATE_POINTER_ERROR_RETURN(_Control, EINVAL, nullptr); _SAFECRT__VALIDATE_CONDITION_ERROR_RETURN(_String != nullptr || *_Context != nullptr, EINVAL, nullptr); - + /* If string==nullptr, continue with previous string */ if (!_String) { _String = *_Context; } - + /* Find beginning of token (skip over leading delimiters). Note that * there is no token iff this loop sets string to point to the terminal null. */ for ( ; *_String != 0; _String++) @@ -2369,9 +2369,9 @@ unsigned char * __cdecl _mbstok_s(unsigned char *_String, const unsigned char *_ } } } - + token = _String; - + /* Find the end of the token. If it is not the end of the string, * put a null there. */ for ( ; *_String != 0; _String++) @@ -2413,10 +2413,10 @@ unsigned char * __cdecl _mbstok_s(unsigned char *_String, const unsigned char *_ } } } - + /* Update the context */ *_Context = _String; - + /* Determine if a token has been found. */ if (token == _String) { @@ -2484,7 +2484,7 @@ size_t __cdecl wcsnlen(const WCHAR *inString, size_t inMaxSize) #endif // PAL_STDCPP_COMPAT /* _makepath_s */ -/* +/* * _makepath_s, _wmakepath_s build up a path starting from the specified components; * will call _SAFECRT_INVALID_PARAMETER if there is not enough space in _Dst; * any of _Drive, _Dir, _Filename and _Ext can be nullptr @@ -2502,17 +2502,17 @@ errno_t __cdecl _makepath_s(char (&_Dst)[_SizeInBytes], const char *_Drive, cons #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _makepath_s(char *_Dst, size_t _SizeInBytes, const char *_Drive, const char *_Dir, const char *_Filename, const char *_Ext) { size_t written; const char *p; char *d; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInBytes); - + /* copy drive */ written = 0; d = _Dst; @@ -2526,7 +2526,7 @@ errno_t __cdecl _makepath_s(char *_Dst, size_t _SizeInBytes, const char *_Drive, *d++ = *_Drive; *d++ = ':'; } - + /* copy dir */ p = _Dir; if (p != nullptr && *p != 0) @@ -2538,7 +2538,7 @@ errno_t __cdecl _makepath_s(char *_Dst, size_t _SizeInBytes, const char *_Drive, } *d++ = *p++; } while (*p != 0); - + p = (const char *)_SAFECRT__MBSDEC((const unsigned char *)_Dir, (const unsigned char *)p); if (*p != '/' && *p != '\\') { @@ -2549,12 +2549,12 @@ errno_t __cdecl _makepath_s(char *_Dst, size_t _SizeInBytes, const char *_Drive, *d++ = '\\'; } } - + /* copy fname */ p = _Filename; if (p != nullptr) { - while (*p != 0) + while (*p != 0) { if(++written >= _SizeInBytes) { @@ -2563,7 +2563,7 @@ errno_t __cdecl _makepath_s(char *_Dst, size_t _SizeInBytes, const char *_Drive, *d++ = *p++; } } - + /* copy extension; check to see if a '.' needs to be inserted */ p = _Ext; if (p != nullptr) @@ -2585,7 +2585,7 @@ errno_t __cdecl _makepath_s(char *_Dst, size_t _SizeInBytes, const char *_Drive, *d++ = *p++; } } - + if(++written > _SizeInBytes) { goto error_return; @@ -2593,7 +2593,7 @@ errno_t __cdecl _makepath_s(char *_Dst, size_t _SizeInBytes, const char *_Drive, *d = 0; _SAFECRT__FILL_STRING(_Dst, _SizeInBytes, written); return 0; - + error_return: _SAFECRT__RESET_STRING(_Dst, _SizeInBytes); _SAFECRT__RETURN_BUFFER_TOO_SMALL(_Dst, _SizeInBytes); @@ -2616,17 +2616,17 @@ errno_t __cdecl _wmakepath_s(WCHAR (&_Dst)[_SizeInWords], const WCHAR *_Drive, c #endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Drive, const WCHAR *_Dir, const WCHAR *_Filename, const WCHAR *_Ext) { size_t written; const WCHAR *p; WCHAR *d; - + /* validation section */ _SAFECRT__VALIDATE_STRING(_Dst, _SizeInWords); - + /* copy drive */ written = 0; d = _Dst; @@ -2640,7 +2640,7 @@ errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Dri *d++ = *_Drive; *d++ = L':'; } - + /* copy dir */ p = _Dir; if (p != nullptr && *p != 0) @@ -2652,7 +2652,7 @@ errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Dri } *d++ = *p++; } while (*p != 0); - + p = p - 1; if (*p != L'/' && *p != L'\\') { @@ -2663,12 +2663,12 @@ errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Dri *d++ = L'\\'; } } - + /* copy fname */ p = _Filename; if (p != nullptr) { - while (*p != 0) + while (*p != 0) { if(++written >= _SizeInWords) { @@ -2677,7 +2677,7 @@ errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Dri *d++ = *p++; } } - + /* copy extension; check to see if a '.' needs to be inserted */ p = _Ext; if (p != nullptr) @@ -2699,7 +2699,7 @@ errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Dri *d++ = *p++; } } - + if(++written > _SizeInWords) { goto error_return; @@ -2707,7 +2707,7 @@ errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Dri *d = 0; _SAFECRT__FILL_STRING(_Dst, _SizeInWords, written); return 0; - + error_return: _SAFECRT__RESET_STRING(_Dst, _SizeInWords); _SAFECRT__RETURN_BUFFER_TOO_SMALL(_Dst, _SizeInWords); @@ -2717,7 +2717,7 @@ errno_t __cdecl _wmakepath_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Dri #endif /* _splitpath_s */ -/* +/* * _splitpath_s, _wsplitpath_s decompose a path into the specified components; * will call _SAFECRT_INVALID_PARAMETER if there is not enough space in * any of _Drive, _Dir, _Filename and _Ext; @@ -2737,7 +2737,7 @@ errno_t __cdecl _splitpath_s( /* no C++ overload for _splitpath_s */ #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _splitpath_s( const char *_Path, @@ -2753,7 +2753,7 @@ errno_t __cdecl _splitpath_s( int drive_set = 0; size_t length = 0; int bEinval = 0; - + /* validation section */ _SAFECRT__VALIDATE_POINTER(_Path); if ((_Drive == nullptr && _DriveSize != 0) || (_Drive != nullptr && _DriveSize == 0)) @@ -2772,13 +2772,13 @@ errno_t __cdecl _splitpath_s( { goto error_einval; } - + /* check if _Path begins with the longpath prefix */ if (_Path[0] == '\\' && _Path[1] == '\\' && _Path[2] == '?' && _Path[3] == '\\') { _Path += 4; } - + /* extract drive letter and ':', if any */ if (!drive_set) { @@ -2809,7 +2809,7 @@ errno_t __cdecl _splitpath_s( } } } - + /* extract path string, if any. _Path now points to the first character * of the path, if any, or the filename or extension, if no path was * specified. Scan ahead for the last occurence, if any, of a '/' or @@ -2833,7 +2833,7 @@ errno_t __cdecl _splitpath_s( { tmp++; } - else + else { if (*tmp == '/' || *tmp == '\\') { @@ -2846,8 +2846,8 @@ errno_t __cdecl _splitpath_s( } } } - - if (last_slash != nullptr) + + if (last_slash != nullptr) { /* found a path - copy up through last_slash or max characters * allowed, whichever is smaller @@ -2870,7 +2870,7 @@ errno_t __cdecl _splitpath_s( _SAFECRT__RESET_STRING(_Dir, _DirSize); } } - + /* extract file name and extension, if any. Path now points to the * first character of the file name, if any, or the extension if no * file name was given. Dot points to the '.' beginning the extension, @@ -2920,12 +2920,12 @@ errno_t __cdecl _splitpath_s( _SAFECRT__RESET_STRING(_Ext, _ExtSize); } } - + return 0; - + error_einval: bEinval = 1; - + error_erange: if (_Drive != nullptr && _DriveSize > 0) { @@ -2943,12 +2943,12 @@ errno_t __cdecl _splitpath_s( { _SAFECRT__RESET_STRING(_Ext, _ExtSize); } - + if (bEinval) { _SAFECRT__RETURN_EINVAL; } - + _SAFECRT__RETURN_BUFFER_TOO_SMALL(_Strings, _StringSizes); /* should never happen, but compiler can't tell */ return EINVAL; @@ -2965,10 +2965,23 @@ errno_t __cdecl _wsplitpath_s( WCHAR *_Ext, size_t _ExtSize ); -/* no C++ overload for _wsplitpath_s */ +#if defined(__cplusplus) && _SAFECRT_USE_CPP_OVERLOADS +template +inline +errno_t __cdecl _wsplitpath_s( + const WCHAR *_Path, + WCHAR (&_Drive)[_DriveSize], + WCHAR (&_Dir)[_DirSize], + WCHAR (&_Filename)[_FilenameSize], + WCHAR (&_Ext)[_ExtSize]) +{ + return _wsplitpath_s(_Path, + _Drive, _DriveSize, _Dir, _DirSize, _Filename, _FilenameSize, _Ext, _ExtSize); +} +#endif #if _SAFECRT_USE_INLINES || _SAFECRT_IMPL - + _SAFECRT__INLINE errno_t __cdecl _wsplitpath_s( const WCHAR *_Path, @@ -2984,7 +2997,7 @@ errno_t __cdecl _wsplitpath_s( int drive_set = 0; size_t length = 0; int bEinval = 0; - + /* validation section */ _SAFECRT__VALIDATE_POINTER(_Path); if ((_Drive == nullptr && _DriveSize != 0) || (_Drive != nullptr && _DriveSize == 0)) @@ -3003,13 +3016,13 @@ errno_t __cdecl _wsplitpath_s( { goto error_einval; } - + /* check if _Path begins with the longpath prefix */ if (_Path[0] == L'\\' && _Path[1] == L'\\' && _Path[2] == L'?' && _Path[3] == L'\\') { _Path += 4; } - + /* extract drive letter and ':', if any */ if (!drive_set) { @@ -3040,7 +3053,7 @@ errno_t __cdecl _wsplitpath_s( } } } - + /* extract path string, if any. _Path now points to the first character * of the path, if any, or the filename or extension, if no path was * specified. Scan ahead for the last occurence, if any, of a '/' or @@ -3065,8 +3078,8 @@ errno_t __cdecl _wsplitpath_s( } } } - - if (last_slash != nullptr) + + if (last_slash != nullptr) { /* found a path - copy up through last_slash or max characters * allowed, whichever is smaller @@ -3089,7 +3102,7 @@ errno_t __cdecl _wsplitpath_s( _SAFECRT__RESET_STRING(_Dir, _DirSize); } } - + /* extract file name and extension, if any. Path now points to the * first character of the file name, if any, or the extension if no * file name was given. Dot points to the '.' beginning the extension, @@ -3139,12 +3152,12 @@ errno_t __cdecl _wsplitpath_s( _SAFECRT__RESET_STRING(_Ext, _ExtSize); } } - + return 0; - + error_einval: bEinval = 1; - + error_erange: if (_Drive != nullptr && _DriveSize > 0) { @@ -3162,12 +3175,12 @@ errno_t __cdecl _wsplitpath_s( { _SAFECRT__RESET_STRING(_Ext, _ExtSize); } - + if (bEinval) { _SAFECRT__RETURN_EINVAL; } - + _SAFECRT__RETURN_BUFFER_TOO_SMALL(_Strings, _StringSizes); /* should never happen, but compiler can't tell */ return EINVAL; @@ -3175,7 +3188,7 @@ errno_t __cdecl _wsplitpath_s( #endif /* sprintf_s, vsprintf_s */ -/* +/* * sprintf_s, swprintf_s, vsprintf_s, vswprintf_s format a string and copy it into _Dst; * need safecrt.lib and msvcrt.dll; * will call _SAFECRT_INVALID_PARAMETER if there is not enough space in _Dst; @@ -3186,9 +3199,9 @@ errno_t __cdecl _wsplitpath_s( * _SizeInBytes/_SizeInWords must be <= (INT_MAX / sizeof(char/WCHAR)); * cannot be used without safecrt.lib */ -_SAFECRT__EXTERN_C +EXTERN_C int __cdecl sprintf_s(char *_Dst, size_t _SizeInBytes, const char *_Format, ...); -_SAFECRT__EXTERN_C +EXTERN_C int __cdecl vsprintf_s(char *_Dst, size_t _SizeInBytes, const char *_Format, va_list _ArgList); #if defined(__cplusplus) && _SAFECRT_USE_CPP_OVERLOADS @@ -3215,15 +3228,15 @@ int __cdecl vsprintf_s(char (&_Dst)[_SizeInBytes], const char *_Format, va_list /* no inline version of sprintf_s, vsprintf_s */ /* swprintf_s, vswprintf_s */ -_SAFECRT__EXTERN_C +EXTERN_C int __cdecl swprintf_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Format, ...); -_SAFECRT__EXTERN_C +EXTERN_C int __cdecl vswprintf_s(WCHAR *_Dst, size_t _SizeInWords, const WCHAR *_Format, va_list _ArgList); #if defined(__cplusplus) && _SAFECRT_USE_CPP_OVERLOADS template inline -int __cdecl swprintf_s(char (&_Dst)[_SizeInWords], const char *_Format, ...) +int __cdecl swprintf_s(WCHAR (&_Dst)[_SizeInWords], const WCHAR *_Format, ...) { int ret; va_list _ArgList; @@ -3235,7 +3248,7 @@ int __cdecl swprintf_s(char (&_Dst)[_SizeInWords], const char *_Format, ...) template inline -int __cdecl vswprintf_s(char (&_Dst)[_SizeInWords], const char *_Format, va_list _ArgList) +int __cdecl vswprintf_s(WCHAR (&_Dst)[_SizeInWords], const WCHAR *_Format, va_list _ArgList) { return vswprintf_s(_Dst, _SizeInWords, _Format, _ArgList); } @@ -3244,7 +3257,7 @@ int __cdecl vswprintf_s(char (&_Dst)[_SizeInWords], const char *_Format, va_list /* no inline version of swprintf_s, vswprintf_s */ /* _snprintf_s, _vsnprintf_s */ -/* +/* * _snprintf_s, _snwprintf_s, _vsnprintf_s, _vsnwprintf_s format a string and copy at max _Count characters into _Dst; * need safecrt.lib and msvcrt.dll; * string _Dst will always be null-terminated; @@ -3255,7 +3268,7 @@ int __cdecl vswprintf_s(char (&_Dst)[_SizeInWords], const char *_Format, va_list * return a negative number if something goes wrong with mbcs conversions (we will not call _SAFECRT_INVALID_PARAMETER); * _SizeInBytes/_SizeInWords must be <= (INT_MAX / sizeof(char/WCHAR)); * cannot be used without safecrt.lib; - * if _Count == _TRUNCATE, we will copy into _Dst as many characters as we can, and + * if _Count == _TRUNCATE, we will copy into _Dst as many characters as we can, and * return -1 if the formatted string does not entirely fit into _Dst (we will not call _SAFECRT_INVALID_PARAMETER); * if _Count == 0, then (_Dst == nullptr && _SizeInBytes == 0) is allowed */ diff --git a/pal/src/CMakeLists.txt b/pal/src/CMakeLists.txt index 5447d95963c..47fa30cc362 100644 --- a/pal/src/CMakeLists.txt +++ b/pal/src/CMakeLists.txt @@ -106,7 +106,9 @@ set(SOURCES safecrt/strcpy_s.c safecrt/strncat_s.c safecrt/strncpy_s.c + safecrt/swprintf.c safecrt/vsprintf.c + safecrt/vswprint.c safecrt/wcscpy_s.c safecrt/wcsncpy_s.c safecrt/xtoa_s.c @@ -146,6 +148,7 @@ set(SOURCES objmgr/shmobject.cpp objmgr/shmobjectmanager.cpp shmemory/shmemory.cpp + synchobj/event.cpp synchobj/mutex.cpp synchmgr/synchcontrollers.cpp synchmgr/synchmanager.cpp diff --git a/pal/src/include/pal/event.hpp b/pal/src/include/pal/event.hpp new file mode 100644 index 00000000000..98eeaee5dbb --- /dev/null +++ b/pal/src/include/pal/event.hpp @@ -0,0 +1,69 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +/*++ + + + +Module Name: + + event.hpp + +Abstract: + + Event object structure definition. + + + +--*/ + +#ifndef _PAL_EVENT_H_ +#define _PAL_EVENT_H_ + +#include "corunix.hpp" + +namespace CorUnix +{ + extern CObjectType otManualResetEvent; + extern CObjectType otAutoResetEvent; + + PAL_ERROR + InternalCreateEvent( + CPalThread *pThread, + LPSECURITY_ATTRIBUTES lpEventAttributes, + BOOL bManualReset, + BOOL bInitialState, + LPCWSTR lpName, + HANDLE *phEvent + ); + + PAL_ERROR + InternalSetEvent( + CPalThread *pThread, + HANDLE hEvent, + BOOL fSetEvent + ); + + PAL_ERROR + InternalOpenEvent( + CPalThread *pThread, + DWORD dwDesiredAccess, + BOOL bInheritHandle, + LPCWSTR lpName, + HANDLE *phEvent + ); + +} + +#endif //PAL_EVENT_H_ + + + + + + + + + + diff --git a/pal/src/loader/module.cpp b/pal/src/loader/module.cpp index b78b7730a38..8fa5ffd2371 100644 --- a/pal/src/loader/module.cpp +++ b/pal/src/loader/module.cpp @@ -573,6 +573,17 @@ GetModuleHandleW( return (HMODULE)&exe_module; } +BOOL +PALAPI +GetModuleHandleExW( + IN DWORD dwFlags, + IN OPTIONAL LPCWSTR lpModuleName, + OUT HMODULE *phModule) +{ + *phModule = NULL; + return FALSE; +} + /* Function: PAL_LoadLibraryDirect diff --git a/pal/src/safecrt/vswprint.c b/pal/src/safecrt/vswprint.c new file mode 100644 index 00000000000..d172305610d --- /dev/null +++ b/pal/src/safecrt/vswprint.c @@ -0,0 +1,293 @@ +// +// Copyright (c) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// + +/*** +*vswprint.c - print formatted data into a string from var arg list +* +*Purpose: +* defines vswprintf(), _vswprintf_c and _vsnwprintf() - print formatted output to +* a string, get the data from an argument ptr instead of explicit +* arguments. +* +*******************************************************************************/ + + +#include +#include +#include +#include "internal_securecrt.h" + +#include "mbusafecrt_internal.h" + +typedef int (*WOUTPUTFN)(miniFILE *, const char16_t *, va_list); + +static int _vswprintf_helper( WOUTPUTFN outfn, char16_t *string, size_t count, const char16_t *format, va_list ap ); +static int _vscwprintf_helper (WOUTPUTFN outfn, const char16_t *format, va_list ap ); + +/*** +*ifndef _COUNT_ +*int _vswprintf(string, format, ap) - print formatted data to string from arg ptr +*else +*ifndef _SWPRINTFS_ERROR_RETURN_FIX +*int _vsnwprintf(string, cnt, format, ap) - print formatted data to string from arg ptr +*else +*int _vswprintf_c(string, cnt, format, ...) - print formatted data to string +*endif +*endif +* +*Purpose: +* Prints formatted data, but to a string and gets data from an argument +* pointer. +* Sets up a FILE so file i/o operations can be used, make string look +* like a huge buffer to it, but _flsbuf will refuse to flush it if it +* fills up. Appends '\0' to make it a true string. +* +* Allocate the 'fake' _iob[] entryit statically instead of on +* the stack so that other routines can assume that _iob[] entries are in +* are in DGROUP and, thus, are near. +* +*ifdef _COUNT_ +*ifndef _SWPRINTFS_ERROR_RETURN_FIX +* The _vsnwprintf() flavor takes a count argument that is +* the max number of bytes that should be written to the +* user's buffer. +* We don't expose this function directly in the headers. +*else +* The _vswprintf_c() flavor does the same thing as the _snwprintf +* above, but, it also fixes an issue in the return value in the case +* when there isn't enough space to write the null terminator +* We don't fix this issue in _vsnwprintf because of backward +* compatibility. In new code, however, _vsnwprintf is #defined to +* _vswprintf_c so users get the fix. +* +*endif +* +* Multi-thread: (1) Since there is no stream, this routine must never try +* to get the stream lock (i.e., there is no stream lock either). (2) +* Also, since there is only one statically allocated 'fake' iob, we must +* lock/unlock to prevent collisions. +* +*Entry: +* char16_t *string - place to put destination string +*ifdef _COUNT_ +* size_t count - max number of bytes to put in buffer +*endif +* char16_t *format - format string, describes format of data +* va_list ap - varargs argument pointer +* +*Exit: +* returns number of wide characters in string +* returns -2 if the string has been truncated (only in _vsnprintf_helper) +* returns -1 in other error cases +* +*Exceptions: +* +*******************************************************************************/ + +int __cdecl _vswprintf_helper ( + WOUTPUTFN woutfn, + char16_t *string, + size_t count, + const char16_t *format, + va_list ap + ) +{ + miniFILE str; + miniFILE *outfile = &str; + int retval; + + _VALIDATE_RETURN( (format != NULL), EINVAL, -1); + + _VALIDATE_RETURN( (count == 0) || (string != NULL), EINVAL, -1 ); + + outfile->_flag = _IOWRT|_IOSTRG; + outfile->_ptr = outfile->_base = (char *) string; + + if(count>(INT_MAX/sizeof(char16_t))) + { + /* old-style functions allow any large value to mean unbounded */ + outfile->_cnt = INT_MAX; + } + else + { + outfile->_cnt = (int)(count*sizeof(char16_t)); + } + + retval = woutfn(outfile, format, ap ); + + if(string==NULL) + { + return retval; + } + + if((retval >= 0) && (_putc_nolock('\0',outfile) != EOF) && (_putc_nolock('\0',outfile) != EOF)) + return(retval); + + string[count - 1] = 0; + if (outfile->_cnt < 0) + { + /* the buffer was too small; we return -2 to indicate truncation */ + return -2; + } + return -1; +} + +int __cdecl _vswprintf_s ( + char16_t *string, + size_t sizeInWords, + const char16_t *format, + va_list ap + ) +{ + int retvalue = -1; + + /* validation section */ + _VALIDATE_RETURN(format != NULL, EINVAL, -1); + _VALIDATE_RETURN(string != NULL && sizeInWords > 0, EINVAL, -1); + + retvalue = _vswprintf_helper(_woutput_s, string, sizeInWords, format, ap); + if (retvalue < 0) + { + string[0] = 0; + _SECURECRT__FILL_STRING(string, sizeInWords, 1); + } + if (retvalue == -2) + { + _VALIDATE_RETURN(("Buffer too small" && 0), ERANGE, -1); + } + if (retvalue >= 0) + { + _SECURECRT__FILL_STRING(string, sizeInWords, retvalue + 1); + } + + return retvalue; +} + +int __cdecl vswprintf_s ( + char16_t *string, + size_t sizeInWords, + const char16_t *format, + va_list ap + ) +{ + return _vswprintf_s(string, sizeInWords, format, ap); +} + +int __cdecl _vsnwprintf_s ( + char16_t *string, + size_t sizeInWords, + size_t count, + const char16_t *format, + va_list ap + ) +{ + int retvalue = -1; + errno_t save_errno = 0; + + /* validation section */ + _VALIDATE_RETURN(format != NULL, EINVAL, -1); + if (count == 0 && string == NULL && sizeInWords == 0) + { + /* this case is allowed; nothing to do */ + return 0; + } + _VALIDATE_RETURN(string != NULL && sizeInWords > 0, EINVAL, -1); + + if (sizeInWords > count) + { + save_errno = errno; + retvalue = _vswprintf_helper(_woutput_s, string, count + 1, format, ap); + if (retvalue == -2) + { + /* the string has been truncated, return -1 */ + _SECURECRT__FILL_STRING(string, sizeInWords, count + 1); + if (errno == ERANGE) + { + errno = save_errno; + } + return -1; + } + } + else /* sizeInWords <= count */ + { + save_errno = errno; + retvalue = _vswprintf_helper(_woutput_s, string, sizeInWords, format, ap); + string[sizeInWords - 1] = 0; + /* we allow truncation if count == _TRUNCATE */ + if (retvalue == -2 && count == _TRUNCATE) + { + if (errno == ERANGE) + { + errno = save_errno; + } + return -1; + } + } + + if (retvalue < 0) + { + string[0] = 0; + _SECURECRT__FILL_STRING(string, sizeInWords, 1); + if (retvalue == -2) + { + _VALIDATE_RETURN(("Buffer too small" && 0), ERANGE, -1); + } + return -1; + } + + _SECURECRT__FILL_STRING(string, sizeInWords, retvalue + 1); + + return (retvalue < 0 ? -1 : retvalue); +} + +/*** +* _vscwprintf() - counts the number of character needed to print the formatted +* data +* +*Purpose: +* Counts the number of characters in the fotmatted data. +* +*Entry: +* char16_t *format - format string, describes format of data +* va_list ap - varargs argument pointer +* +*Exit: +* returns number of characters needed to print formatted data. +* +*Exceptions: +* +*******************************************************************************/ + +#ifndef _COUNT_ + +int __cdecl _vscwprintf_helper ( + WOUTPUTFN woutfn, + const char16_t *format, + va_list ap + ) +{ + miniFILE str; + miniFILE *outfile = &str; + int retval; + + _VALIDATE_RETURN( (format != NULL), EINVAL, -1); + + outfile->_cnt = INT_MAX; //MAXSTR; + outfile->_flag = _IOWRT|_IOSTRG; + outfile->_ptr = outfile->_base = NULL; + + retval = woutfn(outfile, format, ap); + return(retval); +} + +int __cdecl _vscwprintf ( + const char16_t *format, + va_list ap + ) +{ + return _vscwprintf_helper(_woutput_s, format, ap); +} + +#endif /* _COUNT_ */ diff --git a/pal/src/synchobj/event.cpp b/pal/src/synchobj/event.cpp new file mode 100644 index 00000000000..54addad51cc --- /dev/null +++ b/pal/src/synchobj/event.cpp @@ -0,0 +1,589 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +/*++ + + + +Module Name: + + event.cpp + +Abstract: + + Implementation of event synchronization object as described in + the WIN32 API + +Revision History: + + + +--*/ + +#include "pal/event.hpp" +#include "pal/thread.hpp" +#include "pal/dbgmsg.h" + +using namespace CorUnix; + +/* ------------------- Definitions ------------------------------*/ +SET_DEFAULT_DEBUG_CHANNEL(SYNC); + +CObjectType CorUnix::otManualResetEvent( + otiManualResetEvent, + NULL, // No cleanup routine + NULL, // No initialization routine + 0, // No immutable data + 0, // No process local data + 0, // No shared data + EVENT_ALL_ACCESS, // Currently ignored (no Win32 security) + CObjectType::SecuritySupported, + CObjectType::SecurityInfoNotPersisted, + CObjectType::UnnamedObject, + CObjectType::LocalDuplicationOnly, + CObjectType::WaitableObject, + CObjectType::ObjectCanBeUnsignaled, + CObjectType::ThreadReleaseHasNoSideEffects, + CObjectType::NoOwner + ); + +CObjectType CorUnix::otAutoResetEvent( + otiAutoResetEvent, + NULL, // No cleanup routine + NULL, // No initialization routine + 0, // No immutable data + 0, // No process local data + 0, // No shared data + EVENT_ALL_ACCESS, // Currently ignored (no Win32 security) + CObjectType::SecuritySupported, + CObjectType::SecurityInfoNotPersisted, + CObjectType::UnnamedObject, + CObjectType::LocalDuplicationOnly, + CObjectType::WaitableObject, + CObjectType::ObjectCanBeUnsignaled, + CObjectType::ThreadReleaseAltersSignalCount, + CObjectType::NoOwner + ); + +PalObjectTypeId rgEventIds[] = {otiManualResetEvent, otiAutoResetEvent}; +CAllowedObjectTypes aotEvent(rgEventIds, sizeof(rgEventIds)/sizeof(rgEventIds[0])); + +/*++ +Function: + CreateEventA + +Note: + lpEventAttributes currentely ignored: + -- Win32 object security not supported + -- handles to event objects are not inheritable + +Parameters: + See MSDN doc. +--*/ + +HANDLE +PALAPI +CreateEventA( + IN LPSECURITY_ATTRIBUTES lpEventAttributes, + IN BOOL bManualReset, + IN BOOL bInitialState, + IN LPCSTR lpName) +{ + HANDLE hEvent = NULL; + CPalThread *pthr = NULL; + PAL_ERROR palError; + + PERF_ENTRY(CreateEventA); + ENTRY("CreateEventA(lpEventAttr=%p, bManualReset=%d, bInitialState=%d, lpName=%p (%s)\n", + lpEventAttributes, bManualReset, bInitialState, lpName, lpName?lpName:"NULL"); + + pthr = InternalGetCurrentThread(); + + if (lpName != nullptr) + { + ASSERT("lpName: Cross-process named objects are not supported in PAL"); + palError = ERROR_NOT_SUPPORTED; + } + else + { + palError = InternalCreateEvent( + pthr, + lpEventAttributes, + bManualReset, + bInitialState, + NULL, + &hEvent + ); + } + + // + // We always need to set last error, even on success: + // we need to protect ourselves from the situation + // where last error is set to ERROR_ALREADY_EXISTS on + // entry to the function + // + + pthr->SetLastError(palError); + + LOGEXIT("CreateEventA returns HANDLE %p\n", hEvent); + PERF_EXIT(CreateEventA); + return hEvent; +} + + +/*++ +Function: + CreateEventW + +Note: + lpEventAttributes currentely ignored: + -- Win32 object security not supported + -- handles to event objects are not inheritable + +Parameters: + See MSDN doc. +--*/ + +HANDLE +PALAPI +CreateEventW( + IN LPSECURITY_ATTRIBUTES lpEventAttributes, + IN BOOL bManualReset, + IN BOOL bInitialState, + IN LPCWSTR lpName) +{ + HANDLE hEvent = NULL; + PAL_ERROR palError; + CPalThread *pthr = NULL; + + PERF_ENTRY(CreateEventW); + ENTRY("CreateEventW(lpEventAttr=%p, bManualReset=%d, " + "bInitialState=%d, lpName=%p (%S)\n", lpEventAttributes, bManualReset, + bInitialState, lpName, lpName?lpName:W16_NULLSTRING); + + pthr = InternalGetCurrentThread(); + + palError = InternalCreateEvent( + pthr, + lpEventAttributes, + bManualReset, + bInitialState, + lpName, + &hEvent + ); + + // + // We always need to set last error, even on success: + // we need to protect ourselves from the situation + // where last error is set to ERROR_ALREADY_EXISTS on + // entry to the function + // + + pthr->SetLastError(palError); + + LOGEXIT("CreateEventW returns HANDLE %p\n", hEvent); + PERF_EXIT(CreateEventW); + return hEvent; +} + +/*++ +Function: + InternalCreateEvent + +Note: + lpEventAttributes currentely ignored: + -- Win32 object security not supported + -- handles to event objects are not inheritable + +Parameters: + pthr -- thread data for calling thread + phEvent -- on success, receives the allocated event handle + + See MSDN docs on CreateEvent for all other parameters +--*/ + +PAL_ERROR +CorUnix::InternalCreateEvent( + CPalThread *pthr, + LPSECURITY_ATTRIBUTES lpEventAttributes, + BOOL bManualReset, + BOOL bInitialState, + LPCWSTR lpName, + HANDLE *phEvent + ) +{ + CObjectAttributes oa(lpName, lpEventAttributes); + PAL_ERROR palError = NO_ERROR; + IPalObject *pobjEvent = NULL; + IPalObject *pobjRegisteredEvent = NULL; + + _ASSERTE(NULL != pthr); + _ASSERTE(NULL != phEvent); + + ENTRY("InternalCreateEvent(pthr=%p, lpEventAttributes=%p, bManualReset=%i, " + "bInitialState=%i, lpName=%p, phEvent=%p)\n", + pthr, + lpEventAttributes, + bManualReset, + bInitialState, + lpName, + phEvent + ); + + if (lpName != nullptr) + { + ASSERT("lpName: Cross-process named objects are not supported in PAL"); + palError = ERROR_NOT_SUPPORTED; + goto InternalCreateEventExit; + } + + palError = g_pObjectManager->AllocateObject( + pthr, + bManualReset ? &otManualResetEvent : &otAutoResetEvent, + &oa, + &pobjEvent + ); + + if (NO_ERROR != palError) + { + goto InternalCreateEventExit; + } + + if (bInitialState) + { + ISynchStateController *pssc; + + palError = pobjEvent->GetSynchStateController( + pthr, + &pssc + ); + + if (NO_ERROR == palError) + { + palError = pssc->SetSignalCount(1); + pssc->ReleaseController(); + } + + if (NO_ERROR != palError) + { + ASSERT("Unable to set new event state (%d)\n", palError); + goto InternalCreateEventExit; + } + } + + palError = g_pObjectManager->RegisterObject( + pthr, + pobjEvent, + &aotEvent, + EVENT_ALL_ACCESS, // Currently ignored (no Win32 security) + phEvent, + &pobjRegisteredEvent + ); + + // + // pobjEvent is invalidated by the call to RegisterObject, so NULL it + // out here to ensure that we don't try to release a reference on + // it down the line. + // + + pobjEvent = NULL; + +InternalCreateEventExit: + + if (NULL != pobjEvent) + { + pobjEvent->ReleaseReference(pthr); + } + + if (NULL != pobjRegisteredEvent) + { + pobjRegisteredEvent->ReleaseReference(pthr); + } + + LOGEXIT("InternalCreateEvent returns %i\n", palError); + + return palError; +} + + +/*++ +Function: + SetEvent + +See MSDN doc. +--*/ + +BOOL +PALAPI +SetEvent( + IN HANDLE hEvent) +{ + PAL_ERROR palError = NO_ERROR; + CPalThread *pthr = NULL; + + PERF_ENTRY(SetEvent); + ENTRY("SetEvent(hEvent=%p)\n", hEvent); + + pthr = InternalGetCurrentThread(); + + palError = InternalSetEvent(pthr, hEvent, TRUE); + + if (NO_ERROR != palError) + { + pthr->SetLastError(palError); + } + + LOGEXIT("SetEvent returns BOOL %d\n", (NO_ERROR == palError)); + PERF_EXIT(SetEvent); + return (NO_ERROR == palError); +} + + +/*++ +Function: + ResetEvent + +See MSDN doc. +--*/ + +BOOL +PALAPI +ResetEvent( + IN HANDLE hEvent) +{ + PAL_ERROR palError = NO_ERROR; + CPalThread *pthr = NULL; + + PERF_ENTRY(ResetEvent); + ENTRY("ResetEvent(hEvent=%p)\n", hEvent); + + pthr = InternalGetCurrentThread(); + + palError = InternalSetEvent(pthr, hEvent, FALSE); + + if (NO_ERROR != palError) + { + pthr->SetLastError(palError); + } + + LOGEXIT("ResetEvent returns BOOL %d\n", (NO_ERROR == palError)); + PERF_EXIT(ResetEvent); + return (NO_ERROR == palError); +} + +/*++ +Function: + InternalCreateEvent + +Parameters: + pthr -- thread data for calling thread + hEvent -- handle to the event to set + fSetEvent -- if TRUE, set the event; if FALSE, reset it +--*/ + +PAL_ERROR +CorUnix::InternalSetEvent( + CPalThread *pthr, + HANDLE hEvent, + BOOL fSetEvent + ) +{ + PAL_ERROR palError = NO_ERROR; + IPalObject *pobjEvent = NULL; + ISynchStateController *pssc = NULL; + + _ASSERTE(NULL != pthr); + + ENTRY("InternalSetEvent(pthr=%p, hEvent=%p, fSetEvent=%i\n", + pthr, + hEvent, + fSetEvent + ); + + palError = g_pObjectManager->ReferenceObjectByHandle( + pthr, + hEvent, + &aotEvent, + 0, // Should be EVENT_MODIFY_STATE; currently ignored (no Win32 security) + &pobjEvent + ); + + if (NO_ERROR != palError) + { + ERROR("Unable to obtain object for handle %p (error %d)!\n", hEvent, palError); + goto InternalSetEventExit; + } + + palError = pobjEvent->GetSynchStateController( + pthr, + &pssc + ); + + if (NO_ERROR != palError) + { + ASSERT("Error %d obtaining synch state controller\n", palError); + goto InternalSetEventExit; + } + + palError = pssc->SetSignalCount(fSetEvent ? 1 : 0); + + if (NO_ERROR != palError) + { + ASSERT("Error %d setting event state\n", palError); + goto InternalSetEventExit; + } + +InternalSetEventExit: + + if (NULL != pssc) + { + pssc->ReleaseController(); + } + + if (NULL != pobjEvent) + { + pobjEvent->ReleaseReference(pthr); + } + + LOGEXIT("InternalSetEvent returns %d\n", palError); + + return palError; +} + +// TODO: Implementation of OpenEventA() doesn't exist, do we need it? More generally, do we need the A versions at all? + +/*++ +Function: + OpenEventW + +Note: + dwDesiredAccess is currently ignored (no Win32 object security support) + bInheritHandle is currently ignored (handles to events are not inheritable) + +Parameters: + See MSDN doc. +--*/ + +HANDLE +PALAPI +OpenEventW( + IN DWORD dwDesiredAccess, + IN BOOL bInheritHandle, + IN LPCWSTR lpName) +{ + HANDLE hEvent = NULL; + PAL_ERROR palError = NO_ERROR; + CPalThread *pthr = NULL; + + PERF_ENTRY(OpenEventW); + ENTRY("OpenEventW(dwDesiredAccess=%#x, bInheritHandle=%d, lpName=%p (%S))\n", + dwDesiredAccess, bInheritHandle, lpName, lpName?lpName:W16_NULLSTRING); + + pthr = InternalGetCurrentThread(); + + /* validate parameters */ + if (lpName == nullptr) + { + ERROR("name is NULL\n"); + palError = ERROR_INVALID_PARAMETER; + goto OpenEventWExit; + } + else + { + ASSERT("lpName: Cross-process named objects are not supported in PAL"); + palError = ERROR_NOT_SUPPORTED; + } + +OpenEventWExit: + + if (NO_ERROR != palError) + { + pthr->SetLastError(palError); + } + + LOGEXIT("OpenEventW returns HANDLE %p\n", hEvent); + PERF_EXIT(OpenEventW); + + return hEvent; +} + +/*++ +Function: + InternalOpenEvent + +Note: + dwDesiredAccess is currently ignored (no Win32 object security support) + bInheritHandle is currently ignored (handles to events are not inheritable) + +Parameters: + pthr -- thread data for calling thread + phEvent -- on success, receives the allocated event handle + + See MSDN docs on OpenEvent for all other parameters. +--*/ + +PAL_ERROR +CorUnix::InternalOpenEvent( + CPalThread *pthr, + DWORD dwDesiredAccess, + BOOL bInheritHandle, + LPCWSTR lpName, + HANDLE *phEvent + ) +{ + PAL_ERROR palError = NO_ERROR; + IPalObject *pobjEvent = NULL; + CPalString sObjectName(lpName); + + _ASSERTE(NULL != pthr); + _ASSERTE(NULL != lpName); + _ASSERTE(NULL != phEvent); + + ENTRY("InternalOpenEvent(pthr=%p, dwDesiredAccess=%#x, bInheritHandle=%d, " + "lpName=%p, phEvent=%p)\n", + pthr, + dwDesiredAccess, + bInheritHandle, + lpName, + phEvent + ); + + palError = g_pObjectManager->LocateObject( + pthr, + &sObjectName, + &aotEvent, + &pobjEvent + ); + + if (NO_ERROR != palError) + { + goto InternalOpenEventExit; + } + + palError = g_pObjectManager->ObtainHandleForObject( + pthr, + pobjEvent, + dwDesiredAccess, + bInheritHandle, + NULL, + phEvent + ); + + if (NO_ERROR != palError) + { + goto InternalOpenEventExit; + } + +InternalOpenEventExit: + + if (NULL != pobjEvent) + { + pobjEvent->ReleaseReference(pthr); + } + + LOGEXIT("InternalOpenEvent returns %d\n", palError); + + return palError; +} + diff --git a/test/AsmJs/rlexe.xml b/test/AsmJs/rlexe.xml index bfbf179e0ae..e105742cf4d 100644 --- a/test/AsmJs/rlexe.xml +++ b/test/AsmJs/rlexe.xml @@ -805,6 +805,7 @@ constloads.js constloads.baseline -testtrace:asmjs -maic:1 + exclude_xplat diff --git a/test/Date/rlexe.xml b/test/Date/rlexe.xml index 654a8d82fe9..b0b4c4fadec 100644 --- a/test/Date/rlexe.xml +++ b/test/Date/rlexe.xml @@ -93,7 +93,8 @@ - exclude_jenkins,exclude_mac + + exclude_jenkins,exclude_mac,exclude_xplat win8 diff --git a/test/Function/rlexe.xml b/test/Function/rlexe.xml index c5ce3a29b4a..9c31c7a52a3 100644 --- a/test/Function/rlexe.xml +++ b/test/Function/rlexe.xml @@ -432,7 +432,7 @@ failnativecodeinstall.js - /maxinterpretcount:2 /lic:1 /bgjit /off:simplejit /on:failnativecodeinstall + -maxinterpretcount:2 -lic:1 -bgjit -off:simplejit -on:failnativecodeinstall exclude_dynapogo,require_backend failnativecodeinstall.baseline diff --git a/test/Optimizer/Invariants.baseline b/test/Optimizer/Invariants.baseline index 3e0f53571ef..902af40f759 100644 --- a/test/Optimizer/Invariants.baseline +++ b/test/Optimizer/Invariants.baseline @@ -1,12 +1,12 @@ test0: 1 test1: 1 test2: undefined -6µ!%$caller -6µ!%$caller -6µ!%$caller -6µ!%$caller -6µ!%$caller -6µ!%$caller -6µ!%$caller -6µ!%$caller +6b!%$caller +6b!%$caller +6b!%$caller +6b!%$caller +6b!%$caller +6b!%$caller +6b!%$caller +6b!%$caller subset_of_ary = diff --git a/test/Optimizer/Invariants.js b/test/Optimizer/Invariants.js index c0235004847..c48f6aa5240 100644 --- a/test/Optimizer/Invariants.js +++ b/test/Optimizer/Invariants.js @@ -41,10 +41,10 @@ WScript.Echo("test2: " + test2()); // When hoisting an invariant with a new dst, value type of the old dst should be copied over to the new dst. function test3() { var func1 = function () { - return '6' + '\xb5!%$' + 'caller'; + return '6' + 'b!%$' + 'caller'; }; var func2 = function () { - return '6' + '\xb5!%$' + 'caller'; + return '6' + 'b!%$' + 'caller'; }; var ary = Array(); diff --git a/test/runtests.py b/test/runtests.py index 20e0ac3224a..d6f01f83842 100755 --- a/test/runtests.py +++ b/test/runtests.py @@ -10,6 +10,7 @@ from threading import Timer import sys import os +import glob import subprocess as SP import traceback import argparse @@ -43,6 +44,8 @@ parser.add_argument('-d', '--debug', action='store_true', help='use debug build'); parser.add_argument('-t', '--test', action='store_true', help='use test build') +parser.add_argument('--variants', metavar='variant', nargs='+', + help='run specified test variants') parser.add_argument('--include-slow', action='store_true', help='include slow tests (timeout ' + str(SLOW_TIMEOUT) + ' seconds)') parser.add_argument('--only-slow', action='store_true', @@ -81,6 +84,11 @@ sys.exit(1) flavor_alias = 'chk' if flavor == 'Debug' else 'fre' +# test variants +if not args.variants: + args.variants = ['interpreted', 'dynapogo'] if sys.platform != 'darwin' \ + else ['disable_jit'] # TODO: JIT for OSX + # binary: full ch path binary = args.binary if binary == None: @@ -114,7 +122,6 @@ if sys.platform != 'win32': not_tags.add('exclude_xplat') not_tags.add('Intl') - not_tags.add('require_backend') not_tags.add('require_debugger') if sys.platform == 'darwin': not_tags.add('exclude_mac') @@ -180,6 +187,7 @@ def normalize_new_line(text): # A test simply contains a collection of test attributes. # Misc attributes added by test run: +# id unique counter to identify a test # filename full path of test file # elapsed_time elapsed time when running the test # @@ -241,13 +249,14 @@ def log(self, filename, fail=False): # interpreted: -maxInterpretCount:1 -maxSimpleJitRunCount:1 -bgjit- # dynapogo: -forceNative -off:simpleJit -bgJitDelay:0 class TestVariant(object): - def __init__(self, name, compile_flags=[]): + def __init__(self, name, compile_flags=[], variant_not_tags=[]): self.name = name self.compile_flags = \ ['-WERExceptionSupport', '-ExtendedErrorStackForTestHost', '-BaselineMode'] + compile_flags + self._compile_flags_has_expansion = self._has_expansion(compile_flags) self.tags = tags.copy() - self.not_tags = not_tags.union( + self.not_tags = not_tags.union(variant_not_tags).union( ['{}_{}'.format(x, name) for x in ('fails','exclude')]) self.msg_queue = Manager().Queue() # messages from multi processes @@ -256,6 +265,19 @@ def __init__(self, name, compile_flags=[]): self._print_lines = [] # _print lines buffer self._last_len = 0 + @staticmethod + def _has_expansion(flags): + return any(re.match('.*\${.*}', f) for f in flags) + + @staticmethod + def _expand(flag, test): + return re.sub('\${id}', str(test.id), flag) + + def _expand_compile_flags(self, test): + if self._compile_flags_has_expansion: + return [self._expand(flag, test) for flag in self.compile_flags] + return self.compile_flags + # check if this test variant should run a given test def _should_test(self, test): tags = split_tags(test.get('tags')) @@ -372,7 +394,8 @@ def _run_one_test(self, test): working_path = os.path.dirname(js_file) flags = test.get('compile-flags') - flags = self.compile_flags + (flags.split() if flags else []) + flags = self._expand_compile_flags(test) + \ + (flags.split() if flags else []) cmd = [binary] + flags + [os.path.basename(js_file)] test.start() @@ -431,7 +454,7 @@ def timeout_func(timeout_data): self._log_result(test, fail=False) # run tests under this variant, using given multiprocessing Pool - def run(self, tests, pool): + def _run(self, tests, pool): print_and_log('\n############# Starting {} variant #############'\ .format(self.name)) if self.tags: @@ -458,6 +481,18 @@ def print_summary(self): print_and_log("----------------------------") print_and_log('Total: {}'.format(self.test_result)) + # run all tests from testLoader + def run(self, testLoader, pool, sequential_pool): + tests, sequential_tests = [], [] + for folder in testLoader.folders(): + if folder.tags.isdisjoint(self.not_tags): + dest = tests if not folder.is_sequential else sequential_tests + dest += folder.tests + if tests: + self._run(tests, pool) + if sequential_tests: + self._run(sequential_tests, sequential_pool) + # global run one test function for multiprocessing, used by TestVariant def run_one(data): try: @@ -467,9 +502,38 @@ def run_one(data): print('ERROR: Unhandled exception!!!') traceback.print_exc() -# record folder/tags info from test_root/rlexedirs.xml -class FolderTags(object): - def __init__(self): + +# A test folder contains a list of tests and maybe some tags. +class TestFolder(object): + def __init__(self, tests, tags=_empty_set): + self.tests = tests + self.tags = tags + self.is_sequential = 'sequential' in tags + +# TestLoader loads all tests +class TestLoader(object): + def __init__(self, paths): + self._folder_tags = self._load_folder_tags() + self._test_id = 0 + self._folders = [] + + for path in paths: + if os.path.isfile(path): + folder, file = os.path.dirname(path), os.path.basename(path) + else: + folder, file = path, None + + ftags = self._get_folder_tags(folder) + if ftags != None: # Only honor entries listed in rlexedirs.xml + tests = self._load_tests(folder, file) + self._folders.append(TestFolder(tests, ftags)) + + def folders(self): + return self._folders + + # load folder/tags info from test_root/rlexedirs.xml + @staticmethod + def _load_folder_tags(): xmlpath = os.path.join(test_root, 'rlexedirs.xml') try: xml = ET.parse(xmlpath).getroot() @@ -477,117 +541,117 @@ def __init__(self): print_and_log('ERROR: failed to read {}'.format(xmlpath)) exit(-1) - self._folder_tags = {} + folder_tags = {} for x in xml: d = x.find('default') key = d.find('files').text.lower() # avoid case mismatch tags = d.find('tags') - self._folder_tags[key] = \ + folder_tags[key] = \ split_tags(tags.text) if tags != None else _empty_set + return folder_tags # get folder tags if any - def _tags(self, folder): + def _get_folder_tags(self, folder): key = os.path.basename(os.path.normpath(folder)).lower() return self._folder_tags.get(key) - # check if should test a given folder - def should_test(self, folder): - ftags = self._tags(folder) + def _next_test_id(self): + self._test_id += 1 + return self._test_id - # folder listed in rlexedirs.xml and not exlucded by global not_tags - return ftags != None and ftags.isdisjoint(not_tags) + # load all tests in folder using rlexe.xml file + def _load_tests(self, folder, file): + try: + xmlpath = os.path.join(folder, 'rlexe.xml') + xml = ET.parse(xmlpath).getroot() + except IOError: + return [] - # check if a given folder is tagged sequential - def is_sequential(self, folder): - ftags = self._tags(folder) - return ftags and 'sequential' in ftags + def test_override(condition, check_tag, check_value, test): + target = condition.find(check_tag) + if target != None and target.text == check_value: + for override in condition.find('override'): + test[override.tag] = override.text -# load all tests in folder using rlexe.xml file -def load_tests(folder, file): - try: - xmlpath = os.path.join(folder, 'rlexe.xml') - xml = ET.parse(xmlpath).getroot() - except IOError: - return [] - - def test_override(condition, check_tag, check_value, test): - target = condition.find(check_tag) - if target != None and target.text == check_value: - for override in condition.find('override'): - test[override.tag] = override.text - - def load_test(testXml): - test = Test(folder=folder) - for c in testXml.find('default'): - if c.tag == 'timeout': # timeout seconds - test[c.tag] = int(c.text) - elif c.tag == 'tags' and c.tag in test: # merge multiple - test[c.tag] = test[c.tag] + ',' + c.text - else: - test[c.tag] = c.text + def load_test(testXml): + test = Test(folder=folder) + for c in testXml.find('default'): + if c.tag == 'timeout': # timeout seconds + test[c.tag] = int(c.text) + elif c.tag == 'tags' and c.tag in test: # merge multiple + test[c.tag] = test[c.tag] + ',' + c.text + else: + test[c.tag] = c.text - condition = testXml.find('condition') - if condition != None: - test_override(condition, 'target', arch_alias, test) + condition = testXml.find('condition') + if condition != None: + test_override(condition, 'target', arch_alias, test) - return test + return test - tests = [load_test(x) for x in xml] - if file != None: - tests = [x for x in tests if x.files == file] - if len(tests) == 0 and is_jsfile(file): - tests = [Test(folder=folder, files=file, baseline='')] - return tests + tests = [load_test(x) for x in xml] + if file != None: + tests = [x for x in tests if x.files == file] + if len(tests) == 0 and self.is_jsfile(file): + tests = [Test(folder=folder, files=file, baseline='')] -def is_jsfile(path): - return os.path.splitext(path)[1] == '.js' + for test in tests: # assign unique test.id + test.id = self._next_test_id() -def main(): - # By default run all tests - if len(args.folders) == 0: - files = (os.path.join(test_root, x) for x in os.listdir(test_root)) - args.folders = [f for f in sorted(files) if not os.path.isfile(f)] + return tests + + @staticmethod + def is_jsfile(path): + return os.path.splitext(path)[1] == '.js' +def main(): # Set the right timezone, the tests need Pacific Standard Time # TODO: Windows. time.tzset only supports Unix if hasattr(time, 'tzset'): os.environ['TZ'] = 'US/Pacific' time.tzset() + # By default run all tests + if len(args.folders) == 0: + files = (os.path.join(test_root, x) for x in os.listdir(test_root)) + args.folders = [f for f in sorted(files) if not os.path.isfile(f)] + # load all tests - tests, sequential_tests = [], [] - folder_tags = FolderTags() - for path in args.folders: - if os.path.isfile(path): - folder, file = os.path.dirname(path), os.path.basename(path) - else: - folder, file = path, None - if folder_tags.should_test(folder): - dest = sequential_tests if folder_tags.is_sequential(folder) \ - else tests - dest += load_tests(folder, file) + testLoader = TestLoader(args.folders) # test variants - variants = [ + variants = [x for x in [ TestVariant('interpreted', [ - '-maxInterpretCount:1', '-maxSimpleJitRunCount:1', '-bgjit-']) - ] + '-maxInterpretCount:1', '-maxSimpleJitRunCount:1', '-bgjit-', + '-dynamicprofilecache:profile.dpl.${id}' + ]), + TestVariant('dynapogo', [ + '-forceNative', '-off:simpleJit', '-bgJitDelay:0', + '-dynamicprofileinput:profile.dpl.${id}' + ]), + TestVariant('disable_jit', [ + '-nonative' + ], [ + 'exclude_interpreted', 'fails_interpreted', 'require_backend' + ]) + ] if x.name in args.variants] + + # rm profile.dpl.* + for f in glob.glob(test_root + '/*/profile.dpl.*'): + os.remove(f) # run each variant pool, sequential_pool = Pool(), Pool(1) start_time = datetime.now() for variant in variants: - if tests: - variant.run(tests, pool) - if sequential_tests: - variant.run(sequential_tests, sequential_pool) + variant.run(testLoader, pool, sequential_pool) elapsed_time = datetime.now() - start_time # print summary for variant in variants: variant.print_summary() - print() + failed = any(variant.test_result.fail_count > 0 for variant in variants) print('[{}] {}'.format( str(elapsed_time), 'Success!' if not failed else 'Failed!')) diff --git a/test/stackfunc/rlexe.xml b/test/stackfunc/rlexe.xml index f45cee4d80f..855f185d799 100644 --- a/test/stackfunc/rlexe.xml +++ b/test/stackfunc/rlexe.xml @@ -667,7 +667,7 @@ 622043.js - /force:deferparse /mic:1 /off:bailonnoprofile /force:inline /off:simplejit + -force:deferparse -mic:1 -off:bailonnoprofile -force:inline -off:simplejit exclude_dynapogo,exclude_fre