diff --git a/Build/NuGet/.pack-version b/Build/NuGet/.pack-version index 6a126f402d5..de28578affc 100644 --- a/Build/NuGet/.pack-version +++ b/Build/NuGet/.pack-version @@ -1 +1 @@ -1.7.5 +1.7.6 diff --git a/lib/Backend/BailOut.cpp b/lib/Backend/BailOut.cpp index c026951859d..a784a37008a 100644 --- a/lib/Backend/BailOut.cpp +++ b/lib/Backend/BailOut.cpp @@ -1006,7 +1006,7 @@ BailOutRecord::RestoreValue(IR::BailOutKind bailOutKind, Js::JavascriptCallStack if (boxStackInstance) { Js::Var oldValue = value; - value = Js::JavascriptOperators::BoxStackInstance(oldValue, scriptContext, /* allowStackFunction */ true); + value = Js::JavascriptOperators::BoxStackInstance(oldValue, scriptContext, /* allowStackFunction */ true, /* deepCopy */ false); if (oldValue != value) { @@ -1275,7 +1275,7 @@ BailOutRecord::BailOutInlinedHelper(Js::JavascriptCallStackLayout * layout, Bail if (inlineeFrameRecord) { InlinedFrameLayout* outerMostFrame = (InlinedFrameLayout *)(((uint8 *)Js::JavascriptCallStackLayout::ToFramePointer(layout)) - entryPointInfo->frameHeight); - inlineeFrameRecord->RestoreFrames(functionBody, outerMostFrame, layout); + inlineeFrameRecord->RestoreFrames(functionBody, outerMostFrame, layout, false /* deepCopy */); } } @@ -1480,7 +1480,7 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF { const Js::Var arg = args.Values[i]; BAILOUT_VERBOSE_TRACE(executeFunction, bailOutKind, _u("BailOut: Argument #%3u: value: 0x%p"), i, arg); - const Js::Var boxedArg = Js::JavascriptOperators::BoxStackInstance(arg, functionScriptContext, true); + const Js::Var boxedArg = Js::JavascriptOperators::BoxStackInstance(arg, functionScriptContext, /* allowStackFunction */ true, /* deepCopy */ false); if(boxedArg != arg) { args.Values[i] = boxedArg; @@ -1775,7 +1775,7 @@ BailOutRecord::BailOutHelper(Js::JavascriptCallStackLayout * layout, Js::ScriptF aReturn = Js::JavascriptFunction::FinishConstructor(aReturn, args.Values[0], function); Js::Var oldValue = aReturn; - aReturn = Js::JavascriptOperators::BoxStackInstance(oldValue, functionScriptContext, /* allowStackFunction */ true); + aReturn = Js::JavascriptOperators::BoxStackInstance(oldValue, functionScriptContext, /* allowStackFunction */ true, /* deepCopy */ false); #if ENABLE_DEBUG_CONFIG_OPTIONS if (oldValue != aReturn) { diff --git a/lib/Backend/GlobOpt.cpp b/lib/Backend/GlobOpt.cpp index f634c6b53b5..835d81e6f1c 100644 --- a/lib/Backend/GlobOpt.cpp +++ b/lib/Backend/GlobOpt.cpp @@ -2859,7 +2859,7 @@ GlobOpt::OptTagChecks(IR::Instr *instr) if (valueType.CanBeTaggedValue() && !valueType.HasBeenNumber() && - (this->IsLoopPrePass() || !this->currentBlock->loop)) + !this->IsLoopPrePass()) { ValueType newValueType = valueType.SetCanBeTaggedValue(false); @@ -2883,7 +2883,16 @@ GlobOpt::OptTagChecks(IR::Instr *instr) bailOutInstr->SetSrc1(srcOpnd); bailOutInstr->GetSrc1()->SetValueType(valueType); instr->InsertBefore(bailOutInstr); - + if (this->currentBlock->loop) + { + // Try hoisting the BailOnNotObject instr. + // But since this isn't the current instr being optimized, we need to play tricks with + // the byteCodeUse fields... + TrackByteCodeUsesForInstrAddedInOptInstr(bailOutInstr, [&]() + { + TryHoistInvariant(bailOutInstr, this->currentBlock, nullptr, value, nullptr, true, false, false, IR::BailOutOnTaggedValue); + }); + } if (symOpnd) { symOpnd->SetPropertyOwnerValueType(newValueType); @@ -3968,25 +3977,10 @@ GlobOpt::CopyPropReplaceOpnd(IR::Instr * instr, IR::Opnd * opnd, StackSym * copy // Try hoisting this checkObjType. // But since this isn't the current instr being optimized, we need to play tricks with // the byteCodeUse fields... - BVSparse *currentBytecodeUses = this->byteCodeUses; - PropertySym * currentPropertySymUse = this->propertySymUse; - PropertySym * tempPropertySymUse = NULL; - this->byteCodeUses = NULL; - BVSparse *tempByteCodeUse = JitAnew(this->tempAlloc, BVSparse, this->tempAlloc); -#if DBG - BVSparse *currentBytecodeUsesBeforeOpt = this->byteCodeUsesBeforeOpt; - this->byteCodeUsesBeforeOpt = tempByteCodeUse; -#endif - this->propertySymUse = NULL; - GlobOpt::TrackByteCodeSymUsed(checkObjTypeInstr, tempByteCodeUse, &tempPropertySymUse); - - TryHoistInvariant(checkObjTypeInstr, this->currentBlock, NULL, CurrentBlockData()->FindValue(copySym), NULL, true); - - this->byteCodeUses = currentBytecodeUses; - this->propertySymUse = currentPropertySymUse; -#if DBG - this->byteCodeUsesBeforeOpt = currentBytecodeUsesBeforeOpt; -#endif + TrackByteCodeUsesForInstrAddedInOptInstr(checkObjTypeInstr, [&]() + { + TryHoistInvariant(checkObjTypeInstr, this->currentBlock, NULL, CurrentBlockData()->FindValue(copySym), NULL, true); + }); } } } @@ -7072,6 +7066,18 @@ GlobOpt::OptConstFoldUnary( this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock); } } + + // If this is an induction variable, then treat it the way the prepass would have if it had seen + // the assignment and the resulting change to the value number, and mark it as indeterminate. + for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) + { + InductionVariable *iv = nullptr; + if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) + { + iv->SetChangeIsIndeterminate(); + } + } + return true; } @@ -12391,6 +12397,17 @@ GlobOpt::OptConstFoldBinary( this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); } + // If this is an induction variable, then treat it the way the prepass would have if it had seen + // the assignment and the resulting change to the value number, and mark it as indeterminate. + for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) + { + InductionVariable *iv = nullptr; + if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) + { + iv->SetChangeIsIndeterminate(); + } + } + return true; } diff --git a/lib/Backend/GlobOpt.h b/lib/Backend/GlobOpt.h index 958007ea42f..01df8013108 100644 --- a/lib/Backend/GlobOpt.h +++ b/lib/Backend/GlobOpt.h @@ -798,6 +798,29 @@ class GlobOpt bool DoPowIntIntTypeSpec() const; bool DoTagChecks() const; + template + void TrackByteCodeUsesForInstrAddedInOptInstr(IR::Instr * trackByteCodeUseOnInstr, Fn fn) + { + BVSparse *currentBytecodeUses = this->byteCodeUses; + PropertySym * currentPropertySymUse = this->propertySymUse; + PropertySym * tempPropertySymUse = NULL; + this->byteCodeUses = NULL; + BVSparse *tempByteCodeUse = JitAnew(this->tempAlloc, BVSparse, this->tempAlloc); +#if DBG + BVSparse *currentBytecodeUsesBeforeOpt = this->byteCodeUsesBeforeOpt; + this->byteCodeUsesBeforeOpt = tempByteCodeUse; +#endif + this->propertySymUse = NULL; + GlobOpt::TrackByteCodeSymUsed(trackByteCodeUseOnInstr, tempByteCodeUse, &tempPropertySymUse); + + fn(); + + this->byteCodeUses = currentBytecodeUses; + this->propertySymUse = currentPropertySymUse; +#if DBG + this->byteCodeUsesBeforeOpt = currentBytecodeUsesBeforeOpt; +#endif + } private: // GlobOptBailout.cpp bool MayNeedBailOut(Loop * loop) const; diff --git a/lib/Backend/GlobOptIntBounds.cpp b/lib/Backend/GlobOptIntBounds.cpp index 2fadf68dcf4..19f2b200e5f 100644 --- a/lib/Backend/GlobOptIntBounds.cpp +++ b/lib/Backend/GlobOptIntBounds.cpp @@ -840,7 +840,7 @@ void GlobOpt::TrackIntSpecializedAddSubConstant( } } while(false); - if(updateSourceBounds && addSubConstantInfo->Offset() != IntConstMin) + if(!this->IsLoopPrePass() && updateSourceBounds && addSubConstantInfo->Offset() != IntConstMin) { // Track bounds for add or sub with a constant. For instance, consider (b = a + 2). The value of 'b' should track // that it is equal to (the value of 'a') + 2. That part has been done above. Similarly, the value of 'a' should diff --git a/lib/Backend/InlineeFrameInfo.cpp b/lib/Backend/InlineeFrameInfo.cpp index 97aa7bd41b6..b5e2d69c517 100644 --- a/lib/Backend/InlineeFrameInfo.cpp +++ b/lib/Backend/InlineeFrameInfo.cpp @@ -199,13 +199,14 @@ void InlineeFrameRecord::Finalize(Func* inlinee, uint32 currentOffset) Assert(this->inlineDepth != 0); } -void InlineeFrameRecord::Restore(Js::FunctionBody* functionBody, InlinedFrameLayout *inlinedFrame, Js::JavascriptCallStackLayout * layout) const +void InlineeFrameRecord::Restore(Js::FunctionBody* functionBody, InlinedFrameLayout *inlinedFrame, Js::JavascriptCallStackLayout * layout, bool deepCopy) const { Assert(this->inlineDepth != 0); Assert(inlineeStartOffset != 0); BAILOUT_VERBOSE_TRACE(functionBody, _u("Restore function object: ")); - Js::Var varFunction = this->Restore(this->functionOffset, /*isFloat64*/ false, /*isInt32*/ false, layout, functionBody); + // No deepCopy needed for just the function + Js::Var varFunction = this->Restore(this->functionOffset, /*isFloat64*/ false, /*isInt32*/ false, layout, functionBody, /*deepCopy*/ false); Assert(Js::ScriptFunction::Is(varFunction)); Js::ScriptFunction* function = Js::ScriptFunction::FromVar(varFunction); @@ -219,7 +220,9 @@ void InlineeFrameRecord::Restore(Js::FunctionBody* functionBody, InlinedFrameLay bool isInt32 = losslessInt32Args.Test(i) != 0; BAILOUT_VERBOSE_TRACE(functionBody, _u("Restore argument %d: "), i); - Js::Var var = this->Restore(this->argOffsets[i], isFloat64, isInt32, layout, functionBody); + // Forward deepCopy flag for the arguments in case their data must be guaranteed + // to have its own lifetime + Js::Var var = this->Restore(this->argOffsets[i], isFloat64, isInt32, layout, functionBody, deepCopy); #if DBG if (!Js::TaggedNumber::Is(var)) { @@ -233,7 +236,7 @@ void InlineeFrameRecord::Restore(Js::FunctionBody* functionBody, InlinedFrameLay BAILOUT_FLUSH(functionBody); } -void InlineeFrameRecord::RestoreFrames(Js::FunctionBody* functionBody, InlinedFrameLayout* outerMostFrame, Js::JavascriptCallStackLayout* callstack) +void InlineeFrameRecord::RestoreFrames(Js::FunctionBody* functionBody, InlinedFrameLayout* outerMostFrame, Js::JavascriptCallStackLayout* callstack, bool deepCopy) { InlineeFrameRecord* innerMostRecord = this; class AutoReverse @@ -271,7 +274,7 @@ void InlineeFrameRecord::RestoreFrames(Js::FunctionBody* functionBody, InlinedFr while (currentRecord) { - currentRecord->Restore(functionBody, currentFrame, callstack); + currentRecord->Restore(functionBody, currentFrame, callstack, deepCopy); currentRecord = currentRecord->parent; currentFrame = currentFrame->Next(); } @@ -280,7 +283,7 @@ void InlineeFrameRecord::RestoreFrames(Js::FunctionBody* functionBody, InlinedFr currentFrame->callInfo.Count = 0; } -Js::Var InlineeFrameRecord::Restore(int offset, bool isFloat64, bool isInt32, Js::JavascriptCallStackLayout * layout, Js::FunctionBody* functionBody) const +Js::Var InlineeFrameRecord::Restore(int offset, bool isFloat64, bool isInt32, Js::JavascriptCallStackLayout * layout, Js::FunctionBody* functionBody, bool deepCopy) const { Js::Var value; bool boxStackInstance = true; @@ -322,7 +325,7 @@ Js::Var InlineeFrameRecord::Restore(int offset, bool isFloat64, bool isInt32, Js if (boxStackInstance) { Js::Var oldValue = value; - value = Js::JavascriptOperators::BoxStackInstance(oldValue, functionBody->GetScriptContext(), /* allowStackFunction */ true); + value = Js::JavascriptOperators::BoxStackInstance(oldValue, functionBody->GetScriptContext(), /* allowStackFunction */ true, deepCopy); #if ENABLE_DEBUG_CONFIG_OPTIONS if (oldValue != value) diff --git a/lib/Backend/InlineeFrameInfo.h b/lib/Backend/InlineeFrameInfo.h index 830551abf11..a5d4b10ac5a 100644 --- a/lib/Backend/InlineeFrameInfo.h +++ b/lib/Backend/InlineeFrameInfo.h @@ -108,7 +108,7 @@ struct InlineeFrameRecord } void PopulateParent(Func* func); - void RestoreFrames(Js::FunctionBody* functionBody, InlinedFrameLayout* outerMostInlinee, Js::JavascriptCallStackLayout* callstack); + void RestoreFrames(Js::FunctionBody* functionBody, InlinedFrameLayout* outerMostInlinee, Js::JavascriptCallStackLayout* callstack, bool deepCopy); void Finalize(Func* inlinee, uint currentOffset); #if DBG_DUMP void Dump() const; @@ -123,8 +123,8 @@ struct InlineeFrameRecord } private: - void Restore(Js::FunctionBody* functionBody, InlinedFrameLayout *outerMostFrame, Js::JavascriptCallStackLayout * layout) const; - Js::Var Restore(int offset, bool isFloat64, bool isInt32, Js::JavascriptCallStackLayout * layout, Js::FunctionBody* functionBody) const; + void Restore(Js::FunctionBody* functionBody, InlinedFrameLayout *outerMostFrame, Js::JavascriptCallStackLayout * layout, bool deepCopy) const; + Js::Var Restore(int offset, bool isFloat64, bool isInt32, Js::JavascriptCallStackLayout * layout, Js::FunctionBody* functionBody, bool deepCopy) const; InlineeFrameRecord* Reverse(); }; diff --git a/lib/Backend/JITThunkEmitter.cpp b/lib/Backend/JITThunkEmitter.cpp index f72b6d83750..bdb1a5c8c71 100644 --- a/lib/Backend/JITThunkEmitter.cpp +++ b/lib/Backend/JITThunkEmitter.cpp @@ -77,7 +77,7 @@ JITThunkEmitter::CreateThunk(uintptr_t entryPoint) if (IsThunkPageEmpty(pageStartAddress)) { - if (this->codeAllocator->Alloc((PVOID)pageStartAddress, AutoSystemInfo::PageSize, MEM_COMMIT, PAGE_EXECUTE, true) == nullptr) + if (this->codeAllocator->AllocPages((PVOID)pageStartAddress, 1, MEM_COMMIT, PAGE_EXECUTE, true) == nullptr) { this->codeAllocator->FreeLocal(localPageAddress); return NULL; @@ -165,7 +165,7 @@ JITThunkEmitter::EnsureInitialized() // check again because we did the first one outside of lock if (this->baseAddress == NULL) { - this->baseAddress = (uintptr_t)this->codeAllocator->Alloc(nullptr, TotalThunkSize, MEM_RESERVE, PAGE_EXECUTE, true); + this->baseAddress = (uintptr_t)this->codeAllocator->AllocPages(nullptr, PageCount, MEM_RESERVE, PAGE_EXECUTE, true); } } return this->baseAddress; diff --git a/lib/Backend/Lower.cpp b/lib/Backend/Lower.cpp index 1f8f734cbf1..afa6413aade 100644 --- a/lib/Backend/Lower.cpp +++ b/lib/Backend/Lower.cpp @@ -8330,7 +8330,7 @@ Lowerer::LowerAddLeftDeadForString(IR::Instr *instr) InsertCompareBranch( regLeftCharLengthOpnd, IR::IntConstOpnd::New(Js::JavascriptString::MaxCharLength, TyUint32, m_func), - Js::OpCode::BrGt_A, + Js::OpCode::BrGe_A, labelHelper, insertBeforeInstr); @@ -14702,6 +14702,29 @@ IR::BranchInstr *Lowerer::InsertTestBranch( return InsertBranch(branchOpCode, isUnsigned, target, insertBeforeInstr); } +/* Inserts add with an overflow check, if we overflow throw OOM + * add dst, src + * jno $continueLabel + * overflow code + * $continueLabel : fall through +*/ +void Lowerer::InsertAddWithOverflowCheck( + const bool needFlags, + IR::Opnd *const dst, + IR::Opnd *src1, + IR::Opnd *src2, + IR::Instr *const insertBeforeInstr, + IR::Instr **const onOverflowInsertBeforeInstrRef) +{ + Func * func = insertBeforeInstr->m_func; + InsertAdd(needFlags, dst, src1, src2, insertBeforeInstr); + + IR::LabelInstr *const continueLabel = IR::LabelInstr::New(Js::OpCode::Label, func, false); + InsertBranch(LowererMD::MDNotOverflowBranchOpcode, continueLabel, insertBeforeInstr); + + *onOverflowInsertBeforeInstrRef = continueLabel; +} + IR::Instr *Lowerer::InsertAdd( const bool needFlags, IR::Opnd *const dst, @@ -23210,7 +23233,15 @@ Lowerer::LowerSetConcatStrMultiItem(IR::Instr * instr) srcLength = IR::RegOpnd::New(TyUint32, func); InsertMove(srcLength, IR::IndirOpnd::New(srcOpnd, Js::ConcatStringMulti::GetOffsetOfcharLength(), TyUint32, func), instr); } - InsertAdd(false, dstLength, dstLength, srcLength, instr); + + IR::Instr *onOverflowInsertBeforeInstr; + InsertAddWithOverflowCheck(false, dstLength, dstLength, srcLength, instr, &onOverflowInsertBeforeInstr); + IR::Instr* callInstr = IR::Instr::New(Js::OpCode::Call, func); + callInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperOp_OutOfMemoryError, func)); + + instr->InsertBefore(onOverflowInsertBeforeInstr); + onOverflowInsertBeforeInstr->InsertBefore(callInstr); + this->m_lowererMD.LowerCall(callInstr, 0); dstOpnd->SetOffset(dstOpnd->GetOffset() * sizeof(Js::JavascriptString *) + Js::ConcatStringMulti::GetOffsetOfSlots()); diff --git a/lib/Backend/Lower.h b/lib/Backend/Lower.h index ec09ac6fe0a..74bc61a76c3 100644 --- a/lib/Backend/Lower.h +++ b/lib/Backend/Lower.h @@ -359,6 +359,8 @@ class Lowerer public: static void InsertDecUInt32PreventOverflow(IR::Opnd *const dst, IR::Opnd *const src, IR::Instr *const insertBeforeInstr, IR::Instr * *const onOverflowInsertBeforeInstrRef = nullptr); + static void InsertAddWithOverflowCheck(const bool needFlags, IR::Opnd *const dst, IR::Opnd *src1, IR::Opnd *src2, IR::Instr *const insertBeforeInstr, IR::Instr **const onOverflowInsertBeforeInstrRef); + void InsertFloatCheckForZeroOrNanBranch(IR::Opnd *const src, const bool branchOnZeroOrNan, IR::LabelInstr *const target, IR::LabelInstr *const fallthroughLabel, IR::Instr *const insertBeforeInstr); public: diff --git a/lib/Common/ChakraCoreVersion.h b/lib/Common/ChakraCoreVersion.h index 48a81b2cec4..45a4b6e5e2c 100644 --- a/lib/Common/ChakraCoreVersion.h +++ b/lib/Common/ChakraCoreVersion.h @@ -17,7 +17,7 @@ // ChakraCore version number definitions (used in ChakraCore binary metadata) #define CHAKRA_CORE_MAJOR_VERSION 1 #define CHAKRA_CORE_MINOR_VERSION 7 -#define CHAKRA_CORE_PATCH_VERSION 5 +#define CHAKRA_CORE_PATCH_VERSION 6 #define CHAKRA_CORE_VERSION_RELEASE_QFE 0 // Redundant with PATCH_VERSION. Keep this value set to 0. // ------------- diff --git a/lib/Common/ConfigFlagsList.h b/lib/Common/ConfigFlagsList.h index eff25065052..257f10115a6 100644 --- a/lib/Common/ConfigFlagsList.h +++ b/lib/Common/ConfigFlagsList.h @@ -601,7 +601,7 @@ PHASE(All) #define DEFAULT_CONFIG_ES7ValuesEntries (true) #define DEFAULT_CONFIG_ESObjectGetOwnPropertyDescriptors (true) -#define DEFAULT_CONFIG_ESSharedArrayBuffer (true) +#define DEFAULT_CONFIG_ESSharedArrayBuffer (false) #define DEFAULT_CONFIG_ES6Verbose (false) #define DEFAULT_CONFIG_ES6All (false) @@ -611,6 +611,7 @@ PHASE(All) #define DEFAULT_CONFIG_TraceAsyncDebugCalls (false) #define DEFAULT_CONFIG_ForcePostLowerGlobOptInstrString (false) #define DEFAULT_CONFIG_EnumerateSpecialPropertiesInDebugger (true) +#define DEFAULT_CONFIG_ESDynamicImport (false) #endif #define DEFAULT_CONFIG_MaxJITFunctionBytecodeByteLength (4800000) @@ -1029,6 +1030,11 @@ FLAGPR (Boolean, ES6, ES7TrailingComma , "Enable ES7 trailing co FLAGPR (Boolean, ES6, ES6IsConcatSpreadable , "Enable ES6 isConcatSpreadable Symbol" , DEFAULT_CONFIG_ES6IsConcatSpreadable) FLAGPR (Boolean, ES6, ES6Math , "Enable ES6 Math extensions" , DEFAULT_CONFIG_ES6Math) +#ifndef COMPILE_DISABLE_ESDynamicImport +#define COMPILE_DISABLE_ESDynamicImport 0 +#endif +FLAGPR_REGOVR_EXP(Boolean, ES6, ESDynamicImport , "Enable dynamic import" , DEFAULT_CONFIG_ESDynamicImport) + FLAGPR (Boolean, ES6, ES6Module , "Enable ES6 Modules" , DEFAULT_CONFIG_ES6Module) FLAGPR (Boolean, ES6, ES6Object , "Enable ES6 Object extensions" , DEFAULT_CONFIG_ES6Object) FLAGPR (Boolean, ES6, ES6Number , "Enable ES6 Number extensions" , DEFAULT_CONFIG_ES6Number) @@ -1078,7 +1084,7 @@ FLAGPR (Boolean, ES6, ESObjectGetOwnPropertyDescriptors, "Enable Objec #ifndef COMPILE_DISABLE_ESSharedArrayBuffer #define COMPILE_DISABLE_ESSharedArrayBuffer 0 #endif -FLAGPRA (Boolean, ES6, ESSharedArrayBuffer , sab , "Enable SharedArrayBuffer" , DEFAULT_CONFIG_ESSharedArrayBuffer) +FLAGPR_REGOVR_EXP(Boolean, ES6, ESSharedArrayBuffer , "Enable SharedArrayBuffer" , DEFAULT_CONFIG_ESSharedArrayBuffer) // /ES6 (BLUE+1) features/flags diff --git a/lib/Common/Core/SysInfo.h b/lib/Common/Core/SysInfo.h index d445fa7fe48..aee010be961 100644 --- a/lib/Common/Core/SysInfo.h +++ b/lib/Common/Core/SysInfo.h @@ -64,6 +64,8 @@ class AutoSystemInfo : public SYSTEM_INFO #endif static DWORD const PageSize = 4096; + static size_t const MaxPageCount = SIZE_MAX / PageSize; + #ifdef STACK_ALIGN static DWORD const StackAlign = STACK_ALIGN; #else diff --git a/lib/Common/Memory/HeapBlockMap.cpp b/lib/Common/Memory/HeapBlockMap.cpp index 9b611814acd..5a6ab212e30 100644 --- a/lib/Common/Memory/HeapBlockMap.cpp +++ b/lib/Common/Memory/HeapBlockMap.cpp @@ -1174,7 +1174,7 @@ HeapBlockMap64::EnsureHeapBlock(void * address, size_t pageCount) { return true; } - address = (void *)((size_t)address + (nodePages * AutoSystemInfo::PageSize)); + address = (void *)((size_t)address + ((size_t)nodePages * AutoSystemInfo::PageSize)); nodePages = HeapBlockMap64::PagesPer4GB; if (pageCountLeft < HeapBlockMap64::PagesPer4GB) { diff --git a/lib/Common/Memory/HeapConstants.h b/lib/Common/Memory/HeapConstants.h index 4e6bc680cec..33137ed4310 100644 --- a/lib/Common/Memory/HeapConstants.h +++ b/lib/Common/Memory/HeapConstants.h @@ -19,6 +19,8 @@ class HeapConstants static const uint MaxMediumObjectSize = 9216; #endif + static const uint MaxLargeObjectSize = 1u << 31; // Maximum recycler object is 2GB + #if defined(_M_IX86_OR_ARM32) // Only if a pointer points to first 8k region of a large object, it will set the mark bit in the chunk->MarkBits // If the pointer points outside of that region, no mark bit will be set diff --git a/lib/Common/Memory/PageAllocator.cpp b/lib/Common/Memory/PageAllocator.cpp index 5ccc4da2bf0..61a3772d25d 100644 --- a/lib/Common/Memory/PageAllocator.cpp +++ b/lib/Common/Memory/PageAllocator.cpp @@ -116,7 +116,7 @@ SegmentBase::Initialize(DWORD allocFlags, bool excludeGuardPages) return false; } - this->address = (char *)GetAllocator()->GetVirtualAllocator()->Alloc(NULL, totalPages * AutoSystemInfo::PageSize, MEM_RESERVE | allocFlags, PAGE_READWRITE, this->IsInCustomHeapAllocator()); + this->address = (char *)GetAllocator()->GetVirtualAllocator()->AllocPages(NULL, totalPages, MEM_RESERVE | allocFlags, PAGE_READWRITE, this->IsInCustomHeapAllocator()); if (this->address == nullptr) { @@ -432,7 +432,7 @@ PageSegmentBase::AllocDecommitPages(uint pageCount, T freePages, } } - void * ret = this->GetAllocator()->GetVirtualAllocator()->Alloc(pages, pageCount * AutoSystemInfo::PageSize, MEM_COMMIT, PAGE_READWRITE, this->IsInCustomHeapAllocator()); + void * ret = this->GetAllocator()->GetVirtualAllocator()->AllocPages(pages, pageCount, MEM_COMMIT, PAGE_READWRITE, this->IsInCustomHeapAllocator()); if (ret != nullptr) { Assert(ret == pages); @@ -2807,8 +2807,7 @@ bool HeapPageAllocator::CreateSecondaryAllocator(SegmentBase* segment, boo } if (!committed && segment->GetSecondaryAllocSize() != 0 && - !this->GetVirtualAllocator()->Alloc(segment->GetSecondaryAllocStartAddress(), segment->GetSecondaryAllocSize(), - MEM_COMMIT, PAGE_READWRITE, true)) + !this->GetVirtualAllocator()->AllocPages(segment->GetSecondaryAllocStartAddress(), segment->GetSecondaryAllocPageCount(), MEM_COMMIT, PAGE_READWRITE, true)) { *allocator = nullptr; return false; diff --git a/lib/Common/Memory/PageAllocator.h b/lib/Common/Memory/PageAllocator.h index 4adc0eb0acd..1970e570d15 100644 --- a/lib/Common/Memory/PageAllocator.h +++ b/lib/Common/Memory/PageAllocator.h @@ -148,6 +148,7 @@ class SegmentBase: public SegmentBaseCommon char* GetSecondaryAllocStartAddress() const { return (this->address + GetAvailablePageCount() * AutoSystemInfo::PageSize); } uint GetSecondaryAllocSize() const { return this->secondaryAllocPageCount * AutoSystemInfo::PageSize; } + uint GetSecondaryAllocPageCount() const { return this->secondaryAllocPageCount; } char* GetAddress() const { return address; } char* GetEndAddress() const { return GetSecondaryAllocStartAddress(); } diff --git a/lib/Common/Memory/Recycler.cpp b/lib/Common/Memory/Recycler.cpp index a3368a73ea9..882f13a43d0 100644 --- a/lib/Common/Memory/Recycler.cpp +++ b/lib/Common/Memory/Recycler.cpp @@ -1433,6 +1433,18 @@ Recycler::LargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes) { Assert((attributes & InternalObjectInfoBitMask) == attributes); + if (size >= HeapConstants::MaxLargeObjectSize) + { + if (nothrow == false) + { + this->OutOfMemory(); + } + else + { + return nullptr; + } + } + char * addr = TryLargeAlloc(heap, size, attributes, nothrow); if (addr == nullptr) { diff --git a/lib/Common/Memory/SectionAllocWrapper.cpp b/lib/Common/Memory/SectionAllocWrapper.cpp index d439c338a3b..3dc1a7bad8a 100644 --- a/lib/Common/Memory/SectionAllocWrapper.cpp +++ b/lib/Common/Memory/SectionAllocWrapper.cpp @@ -541,8 +541,14 @@ SectionAllocWrapper::SectionAllocWrapper(HANDLE process) : } LPVOID -SectionAllocWrapper::Alloc(LPVOID requestAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) +SectionAllocWrapper::AllocPages(LPVOID requestAddress, size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) { + if (pageCount > AutoSystemInfo::MaxPageCount) + { + return nullptr; + } + size_t dwSize = pageCount * AutoSystemInfo::PageSize; + Assert(isCustomHeapAllocation); LPVOID address = nullptr; @@ -853,8 +859,14 @@ LPVOID PreReservedSectionAllocWrapper::EnsurePreReservedRegionInternal() return startAddress; } -LPVOID PreReservedSectionAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) +LPVOID PreReservedSectionAllocWrapper::AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) { + if (pageCount > AutoSystemInfo::MaxPageCount) + { + return nullptr; + } + size_t dwSize = pageCount * AutoSystemInfo::PageSize; + AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?"); AssertMsg(AutoSystemInfo::Data.IsCFGEnabled() || PHASE_FORCE1(Js::PreReservedHeapAllocPhase), "PreReservation without CFG ?"); Assert(dwSize != 0); diff --git a/lib/Common/Memory/SectionAllocWrapper.h b/lib/Common/Memory/SectionAllocWrapper.h index 8a3086ea99c..0d19daa2a50 100644 --- a/lib/Common/Memory/SectionAllocWrapper.h +++ b/lib/Common/Memory/SectionAllocWrapper.h @@ -133,7 +133,7 @@ class SectionAllocWrapper public: SectionAllocWrapper(HANDLE process); - LPVOID Alloc(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); + LPVOID AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); BOOL Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType); LPVOID AllocLocal(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize); BOOL FreeLocal(LPVOID lpAddress); @@ -160,7 +160,7 @@ class PreReservedSectionAllocWrapper PreReservedSectionAllocWrapper(HANDLE process); ~PreReservedSectionAllocWrapper(); - LPVOID Alloc(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); + LPVOID AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); BOOL Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType); LPVOID AllocLocal(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize); BOOL FreeLocal(LPVOID lpAddress); diff --git a/lib/Common/Memory/VirtualAllocWrapper.cpp b/lib/Common/Memory/VirtualAllocWrapper.cpp index e10c7389eb7..ba0b049b090 100644 --- a/lib/Common/Memory/VirtualAllocWrapper.cpp +++ b/lib/Common/Memory/VirtualAllocWrapper.cpp @@ -10,8 +10,14 @@ VirtualAllocWrapper VirtualAllocWrapper::Instance; // single instance -LPVOID VirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) +LPVOID VirtualAllocWrapper::AllocPages(LPVOID lpAddress, size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) { + if (pageCount > AutoSystemInfo::MaxPageCount) + { + return nullptr; + } + size_t dwSize = pageCount * AutoSystemInfo::PageSize; + LPVOID address = nullptr; #if defined(ENABLE_JIT_CLAMP) @@ -264,8 +270,14 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal() * - Tracks the committed pages */ -LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) +LPVOID PreReservedVirtualAllocWrapper::AllocPages(LPVOID lpAddress, size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) { + if (pageCount > AutoSystemInfo::MaxPageCount) + { + return nullptr; + } + size_t dwSize = pageCount * AutoSystemInfo::PageSize; + AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?"); AssertMsg(AutoSystemInfo::Data.IsCFGEnabled() || PHASE_FORCE1(Js::PreReservedHeapAllocPhase), "PreReservation without CFG ?"); Assert(dwSize != 0); diff --git a/lib/Common/Memory/VirtualAllocWrapper.h b/lib/Common/Memory/VirtualAllocWrapper.h index cd4c06e7458..54071c6d3d5 100644 --- a/lib/Common/Memory/VirtualAllocWrapper.h +++ b/lib/Common/Memory/VirtualAllocWrapper.h @@ -22,7 +22,7 @@ class PreReservedSectionAllocWrapper; class VirtualAllocWrapper { public: - LPVOID Alloc(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); + LPVOID AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); BOOL Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType); LPVOID AllocLocal(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize) { return lpAddress; } BOOL FreeLocal(LPVOID lpAddress) { return true; } @@ -54,7 +54,7 @@ class PreReservedVirtualAllocWrapper public: PreReservedVirtualAllocWrapper(); ~PreReservedVirtualAllocWrapper(); - LPVOID Alloc(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); + LPVOID AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation); BOOL Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType); LPVOID AllocLocal(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize) { return lpAddress; } BOOL FreeLocal(LPVOID lpAddress) { return true; } diff --git a/lib/Parser/Parse.cpp b/lib/Parser/Parse.cpp index 4df6b8b2f37..807bb4b4983 100644 --- a/lib/Parser/Parse.cpp +++ b/lib/Parser/Parse.cpp @@ -2657,6 +2657,11 @@ ParseNodePtr Parser::ParseImport() // import() if (m_token.tk == tkLParen) { + if (!m_scriptContext->GetConfig()->IsESDynamicImportEnabled()) + { + Error(ERRsyntax); + } + ParseNodePtr pnode = ParseImportCall(); BOOL fCanAssign; IdentToken token; @@ -3528,7 +3533,7 @@ LFunction : break; case tkIMPORT: - if (m_scriptContext->GetConfig()->IsES6ModuleEnabled()) + if (m_scriptContext->GetConfig()->IsES6ModuleEnabled() && m_scriptContext->GetConfig()->IsESDynamicImportEnabled()) { m_pscan->Scan(); ChkCurTokNoScan(tkLParen, ERRnoLparen); diff --git a/lib/Runtime/Base/ThreadConfigFlagsList.h b/lib/Runtime/Base/ThreadConfigFlagsList.h index 3fe94205414..019bb2da82d 100644 --- a/lib/Runtime/Base/ThreadConfigFlagsList.h +++ b/lib/Runtime/Base/ThreadConfigFlagsList.h @@ -47,6 +47,7 @@ FLAG_RELEASE(IsES7AsyncAndAwaitEnabled, ES7AsyncAwait) FLAG_RELEASE(IsArrayBufferTransferEnabled, ArrayBufferTransfer) FLAG_RELEASE(IsESObjectGetOwnPropertyDescriptorsEnabled, ESObjectGetOwnPropertyDescriptors) FLAG_RELEASE(IsESSharedArrayBufferEnabled, ESSharedArrayBuffer) +FLAG_RELEASE(IsESDynamicImportEnabled, ESDynamicImport) #ifdef ENABLE_PROJECTION FLAG(AreWinRTDelegatesInterfaces, WinRTDelegateInterfaces) FLAG_RELEASE(IsWinRTAdaptiveAppsEnabled, WinRTAdaptiveApps) diff --git a/lib/Runtime/ByteCode/ByteCodeEmitter.cpp b/lib/Runtime/ByteCode/ByteCodeEmitter.cpp index c20647f4780..047bf78fda7 100644 --- a/lib/Runtime/ByteCode/ByteCodeEmitter.cpp +++ b/lib/Runtime/ByteCode/ByteCodeEmitter.cpp @@ -3535,6 +3535,10 @@ void ByteCodeGenerator::StartEmitFunction(ParseNode *pnodeFnc) funcInfo->paramScope->AddSymbol(sym); } sym->EnsureScopeSlot(funcInfo); + if (sym->GetHasNonLocalReference()) + { + sym->GetScope()->SetHasOwnLocalInClosure(true); + } } } @@ -11447,6 +11451,7 @@ void Emit(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *func } }; + ByteCodeGenerator::TryScopeRecord tryRecForCatch(Js::OpCode::ResumeCatch, catchLabel); if (isPattern) { Parser::MapBindIdentifier(pnodeObj->sxParamPattern.pnode1, [&](ParseNodePtr item) @@ -11467,7 +11472,6 @@ void Emit(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *func ParseNodePtr pnode1 = pnodeObj->sxParamPattern.pnode1; Assert(pnode1->IsPattern()); - ByteCodeGenerator::TryScopeRecord tryRecForCatch(Js::OpCode::ResumeCatch, catchLabel); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForCatch); @@ -11490,7 +11494,6 @@ void Emit(ParseNode *pnode, ByteCodeGenerator *byteCodeGenerator, FuncInfo *func byteCodeGenerator->Writer()->Empty(Js::OpCode::Nop); byteCodeGenerator->EndStatement(pnodeCatch); - ByteCodeGenerator::TryScopeRecord tryRecForCatch(Js::OpCode::ResumeCatch, catchLabel); if (funcInfo->byteCodeFunction->IsCoroutine()) { byteCodeGenerator->tryScopeRecordsList.LinkToEnd(&tryRecForCatch); diff --git a/lib/Runtime/ByteCode/ByteCodeGenerator.cpp b/lib/Runtime/ByteCode/ByteCodeGenerator.cpp index 86304e5a601..078face02db 100644 --- a/lib/Runtime/ByteCode/ByteCodeGenerator.cpp +++ b/lib/Runtime/ByteCode/ByteCodeGenerator.cpp @@ -1423,6 +1423,11 @@ FuncInfo * ByteCodeGenerator::StartBindFunction(const char16 *name, uint nameLen sym->SetPosition(parseableFunctionInfo->GetOrAddPropertyIdTracked(sym->GetName())); pnode->sxFnc.SetFuncSymbol(sym); + + if (funcExprScope->GetIsObject()) + { + funcExprScope->SetMustInstantiate(true); + } } Scope *paramScope = pnode->sxFnc.pnodeScopes ? pnode->sxFnc.pnodeScopes->sxBlock.scope : nullptr; diff --git a/lib/Runtime/Debug/DiagObjectModel.h b/lib/Runtime/Debug/DiagObjectModel.h index 12f44069f2b..ee3b86dc061 100644 --- a/lib/Runtime/Debug/DiagObjectModel.h +++ b/lib/Runtime/Debug/DiagObjectModel.h @@ -370,7 +370,7 @@ namespace Js { activeScopeObject->SetPropertyWithAttributes( resolveObject.propId, - JavascriptOperators::BoxStackInstance(resolveObject.obj, scriptContext), //The value escapes, box if necessary. + JavascriptOperators::BoxStackInstance(resolveObject.obj, scriptContext, /* allowStackFunction */ false, /* deepCopy */ false), //The value escapes, box if necessary. resolveObject.isConst ? PropertyConstDefaults : PropertyDynamicTypeDefaults, nullptr); } diff --git a/lib/Runtime/Debug/DiagStackFrame.cpp b/lib/Runtime/Debug/DiagStackFrame.cpp index 14611247fb8..0348c3a5c1b 100644 --- a/lib/Runtime/Debug/DiagStackFrame.cpp +++ b/lib/Runtime/Debug/DiagStackFrame.cpp @@ -348,7 +348,7 @@ namespace Js { activeScopeObject->SetPropertyWithAttributes( Js::PropertyIds::_this, - JavascriptOperators::BoxStackInstance(varThis, scriptContext), //The value escapes, box if necessary. + JavascriptOperators::BoxStackInstance(varThis, scriptContext, /* allowStackFunction */ false, /* deepCopy */ false), //The value escapes, box if necessary. PropertyConstDefaults, nullptr); #if DBG diff --git a/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp b/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp index fee850cab31..c0a1be04589 100644 --- a/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp +++ b/lib/Runtime/Language/AsmJsByteCodeGenerator.cpp @@ -1024,7 +1024,7 @@ namespace Js AsmJsFunctionDeclaration* sym = mCompiler->LookupFunction(funcName); if( !sym ) { - throw AsmJsCompilationException( _u("Undefined function %s"), funcName ); + throw AsmJsCompilationException( _u("Undefined function %s"), funcName->Psz() ); } @@ -1266,8 +1266,13 @@ namespace Js LoadModuleFFI( AsmJsFunctionMemory::FunctionRegister, sym->GetFunctionIndex() ); break; case AsmJsSymbol::FuncPtrTable: - LoadModuleFunctionTable( AsmJsFunctionMemory::FunctionRegister, sym->GetFunctionIndex(), funcTableIndexRegister ); + // Make sure the user is not trying to call the function table directly + if (funcTableIndexRegister == Constants::NoRegister) + { + throw AsmJsCompilationException(_u("Direct call to function table '%s' is not allowed"), funcName->Psz()); + } mFunction->ReleaseTmpRegister( funcTableIndexRegister ); + LoadModuleFunctionTable( AsmJsFunctionMemory::FunctionRegister, sym->GetFunctionIndex(), funcTableIndexRegister ); break; default: Assert( false ); diff --git a/lib/Runtime/Language/JavascriptOperators.cpp b/lib/Runtime/Language/JavascriptOperators.cpp index 4ed5cecaefa..3ca0681b8a9 100644 --- a/lib/Runtime/Language/JavascriptOperators.cpp +++ b/lib/Runtime/Language/JavascriptOperators.cpp @@ -9653,8 +9653,14 @@ namespace Js if (FAILED(hr)) { + // We cannot just use the buffer in the specifier string - need to make a copy here. + size_t length = wcslen(moduleName); + char16* allocatedString = RecyclerNewArrayLeaf(scriptContext->GetRecycler(), char16, length + 1); + wmemcpy_s(allocatedString, length + 1, moduleName, length); + allocatedString[length] = _u('\0'); + Js::JavascriptError *error = scriptContext->GetLibrary()->CreateURIError(); - JavascriptError::SetErrorMessageProperties(error, hr, moduleName, scriptContext); + JavascriptError::SetErrorMessageProperties(error, hr, allocatedString, scriptContext); return SourceTextModuleRecord::ResolveOrRejectDynamicImportPromise(false, error, scriptContext); } @@ -9790,7 +9796,7 @@ namespace Js } Js::Var - JavascriptOperators::BoxStackInstance(Js::Var instance, ScriptContext * scriptContext, bool allowStackFunction) + JavascriptOperators::BoxStackInstance(Js::Var instance, ScriptContext * scriptContext, bool allowStackFunction, bool deepCopy) { if (!ThreadContext::IsOnStack(instance) || (allowStackFunction && !TaggedNumber::Is(instance) && (*(int*)instance & 1))) { @@ -9812,11 +9818,11 @@ namespace Js case Js::TypeIds_Object: return DynamicObject::BoxStackInstance(DynamicObject::FromVar(instance)); case Js::TypeIds_Array: - return JavascriptArray::BoxStackInstance(JavascriptArray::FromVar(instance)); + return JavascriptArray::BoxStackInstance(JavascriptArray::FromVar(instance), deepCopy); case Js::TypeIds_NativeIntArray: - return JavascriptNativeIntArray::BoxStackInstance(JavascriptNativeIntArray::FromVar(instance)); + return JavascriptNativeIntArray::BoxStackInstance(JavascriptNativeIntArray::FromVar(instance), deepCopy); case Js::TypeIds_NativeFloatArray: - return JavascriptNativeFloatArray::BoxStackInstance(JavascriptNativeFloatArray::FromVar(instance)); + return JavascriptNativeFloatArray::BoxStackInstance(JavascriptNativeFloatArray::FromVar(instance), deepCopy); case Js::TypeIds_Function: Assert(allowStackFunction); // Stack functions are deal with not mar mark them, but by nested function escape analysis diff --git a/lib/Runtime/Language/JavascriptOperators.h b/lib/Runtime/Language/JavascriptOperators.h index fd9e912d4ef..e229173b2e9 100644 --- a/lib/Runtime/Language/JavascriptOperators.h +++ b/lib/Runtime/Language/JavascriptOperators.h @@ -611,7 +611,7 @@ namespace Js #endif static RecyclableObject *GetCallableObjectOrThrow(const Var callee, ScriptContext *const scriptContext); - static Js::Var BoxStackInstance(Js::Var value, ScriptContext * scriptContext, bool allowStackFunction = false); + static Js::Var BoxStackInstance(Js::Var value, ScriptContext * scriptContext, bool allowStackFunction, bool deepCopy); static BOOL PropertyReferenceWalkUnscopable(Var instance, RecyclableObject** propertyObject, PropertyId propertyId, Var* value, PropertyValueInfo* info, ScriptContext* requestContext); static BOOL PropertyReferenceWalk(Var instance, RecyclableObject** propertyObject, PropertyId propertyId, Var* value, PropertyValueInfo* info, ScriptContext* requestContext); diff --git a/lib/Runtime/Language/JavascriptStackWalker.cpp b/lib/Runtime/Language/JavascriptStackWalker.cpp index 99c768e4b55..1a6eb32f616 100644 --- a/lib/Runtime/Language/JavascriptStackWalker.cpp +++ b/lib/Runtime/Language/JavascriptStackWalker.cpp @@ -450,7 +450,7 @@ namespace Js // are inlined frames on the stack the InlineeCallInfo of the first inlined frame // has the native offset of the current physical frame. Assert(!*inlinee); - InlinedFrameWalker::FromPhysicalFrame(tmpFrameWalker, currentFrame, ScriptFunction::FromVar(parentFunction), PreviousInterpreterFrameIsFromBailout(), loopNum, this, useInternalFrameInfo); + InlinedFrameWalker::FromPhysicalFrame(tmpFrameWalker, currentFrame, ScriptFunction::FromVar(parentFunction), PreviousInterpreterFrameIsFromBailout(), loopNum, this, useInternalFrameInfo, false /*noAlloc*/, false /*deepCopy*/); inlineeOffset = tmpFrameWalker.GetBottomMostInlineeOffset(); tmpFrameWalker.Close(); } @@ -555,7 +555,8 @@ namespace Js } bool hasInlinedFramesOnStack = InlinedFrameWalker::FromPhysicalFrame(inlinedFrameWalker, currentFrame, - ScriptFunction::FromVar(function), true /*fromBailout*/, loopNum, this, false /*useInternalFrameInfo*/); + ScriptFunction::FromVar(function), true /*fromBailout*/, loopNum, this, false /*useInternalFrameInfo*/, false /*noAlloc*/, this->deepCopyForArgs); + if (hasInlinedFramesOnStack) { // We're now back in the state where currentFrame == physical frame of the inliner, but @@ -602,7 +603,18 @@ namespace Js // Check whether there are inlined frames nested in this native frame. The corresponding check for // a jitted loop body frame should have been done in CheckJavascriptFrame Assert(lastInternalFrameInfo.codeAddress == nullptr); - if (InlinedFrameWalker::FromPhysicalFrame(inlinedFrameWalker, currentFrame, ScriptFunction::FromVar(function))) + bool inlinedFramesFound = InlinedFrameWalker::FromPhysicalFrame( + inlinedFrameWalker, + currentFrame, + ScriptFunction::FromVar(function), + false, // fromBailout + -1, // loopNum + nullptr,// walker + false, // useInternalFrameInfo + false, // noAlloc + this->deepCopyForArgs + ); + if (inlinedFramesFound) { this->inlinedFramesBeingWalked = inlinedFrameWalker.Next(inlinedFrameCallInfo); this->hasInlinedFramesOnStack = true; @@ -621,7 +633,8 @@ namespace Js _NOINLINE JavascriptStackWalker::JavascriptStackWalker(ScriptContext * scriptContext, bool useEERContext, PVOID returnAddress, bool _forceFullWalk /*=false*/) : inlinedFrameCallInfo(CallFlags_None, 0), shouldDetectPartiallyInitializedInterpreterFrame(true), forceFullWalk(_forceFullWalk), - previousInterpreterFrameIsFromBailout(false), previousInterpreterFrameIsForLoopBody(false), hasInlinedFramesOnStack(false) + previousInterpreterFrameIsFromBailout(false), previousInterpreterFrameIsForLoopBody(false), hasInlinedFramesOnStack(false), + deepCopyForArgs(false) { if (scriptContext == NULL) { @@ -917,7 +930,7 @@ namespace Js Assert((this->interpreterFrame->GetFlags() & Js::InterpreterStackFrameFlags_FromBailOut) != 0); InlinedFrameWalker tmpFrameWalker; Assert(InlinedFrameWalker::FromPhysicalFrame(tmpFrameWalker, currentFrame, Js::ScriptFunction::FromVar(argv[JavascriptFunctionArgIndex_Function]), - true /*fromBailout*/, this->tempInterpreterFrame->GetCurrentLoopNum(), this, false /*useInternalFrameInfo*/, true /*noAlloc*/)); + true /*fromBailout*/, this->tempInterpreterFrame->GetCurrentLoopNum(), this, false /*useInternalFrameInfo*/, true /*noAlloc*/, false /*deepCopy*/)); tmpFrameWalker.Close(); } #endif //DBG @@ -964,9 +977,10 @@ namespace Js { if (includeInlineFrames && InlinedFrameWalker::FromPhysicalFrame(inlinedFrameWalker, currentFrame, Js::ScriptFunction::FromVar(argv[JavascriptFunctionArgIndex_Function]), - false /*fromBailout*/, this->tempInterpreterFrame->GetCurrentLoopNum(), this, false /*useInternalFrameInfo*/)) + false /*fromBailout*/, this->tempInterpreterFrame->GetCurrentLoopNum(), this, false /*useInternalFrameInfo*/, false /*noAlloc*/, this->deepCopyForArgs)) { // Found inlined frames in a jitted loop body. We dont want to skip the inlined frames; walk all of them before setting codeAddress on lastInternalFrameInfo. + // DeepCopy here because, if there is an inlinee in a loop body, FromPhysicalFrame won't be called from UpdateFrame this->inlinedFramesBeingWalked = inlinedFrameWalker.Next(inlinedFrameCallInfo); this->hasInlinedFramesOnStack = true; Assert(inlinedFramesBeingWalked); @@ -1208,7 +1222,7 @@ namespace Js #if ENABLE_NATIVE_CODEGEN bool InlinedFrameWalker::FromPhysicalFrame(InlinedFrameWalker& self, StackFrame& physicalFrame, Js::ScriptFunction *parent, bool fromBailout, - int loopNum, const JavascriptStackWalker * const stackWalker, bool useInternalFrameInfo, bool noAlloc) + int loopNum, const JavascriptStackWalker * const stackWalker, bool useInternalFrameInfo, bool noAlloc, bool deepCopy) { bool inlinedFramesFound = false; FunctionBody* parentFunctionBody = parent->GetFunctionBody(); @@ -1261,7 +1275,7 @@ namespace Js if (record) { - record->RestoreFrames(parent->GetFunctionBody(), outerMostFrame, JavascriptCallStackLayout::FromFramePointer(framePointer)); + record->RestoreFrames(parent->GetFunctionBody(), outerMostFrame, JavascriptCallStackLayout::FromFramePointer(framePointer), deepCopy); } } @@ -1347,7 +1361,7 @@ namespace Js for (size_t i = 0; i < argCount; i++) { - args[i] = Js::JavascriptOperators::BoxStackInstance(args[i], scriptContext); + args[i] = Js::JavascriptOperators::BoxStackInstance(args[i], scriptContext, false /*allowStackFunction*/, false /*deepCopy*/); } } diff --git a/lib/Runtime/Language/JavascriptStackWalker.h b/lib/Runtime/Language/JavascriptStackWalker.h index de3a5ee088b..9dd22540f98 100644 --- a/lib/Runtime/Language/JavascriptStackWalker.h +++ b/lib/Runtime/Language/JavascriptStackWalker.h @@ -96,8 +96,8 @@ namespace Js Assert(currentIndex == -1); } - static bool FromPhysicalFrame(InlinedFrameWalker& self, StackFrame& physicalFrame, Js::ScriptFunction *parent, bool fromBailout = false, - int loopNum = -1, const JavascriptStackWalker * const walker = nullptr, bool useInternalFrameInfo = false, bool noAlloc = false); + static bool FromPhysicalFrame(InlinedFrameWalker& self, StackFrame& physicalFrame, Js::ScriptFunction *parent, bool fromBailout, + int loopNum, const JavascriptStackWalker * const walker, bool useInternalFrameInfo, bool noAlloc, bool deepCopy); void Close(); bool Next(CallInfo& callInfo); size_t GetArgc() const; @@ -304,6 +304,8 @@ namespace Js { return previousInterpreterFrameIsFromBailout; } + + void SetDeepCopyForArguments() { deepCopyForArgs = true; } #if DBG static bool ValidateTopJitFrame(Js::ScriptContext* scriptContext); #endif @@ -328,6 +330,7 @@ namespace Js bool previousInterpreterFrameIsFromBailout : 1; bool previousInterpreterFrameIsForLoopBody : 1; bool forceFullWalk : 1; // ignoring hasCaller + bool deepCopyForArgs : 1; // indicates when Var's data should be deep-copied when gathering Arguments for the frame Var GetThisFromFrame() const; // returns 'this' object from the physical frame Var GetCurrentArgumentsObject() const; // returns arguments object from the current frame, which may be virtual (belonging to an inlinee) diff --git a/lib/Runtime/Language/SourceTextModuleRecord.cpp b/lib/Runtime/Language/SourceTextModuleRecord.cpp index 6fdc57df4e8..09a7333fdbd 100644 --- a/lib/Runtime/Language/SourceTextModuleRecord.cpp +++ b/lib/Runtime/Language/SourceTextModuleRecord.cpp @@ -113,8 +113,16 @@ namespace Js Assert(sourceLength == 0); OUTPUT_TRACE_DEBUGONLY(Js::ModulePhase, _u("Failed to load: %s\n"), this->GetSpecifierSz()); hr = E_FAIL; + + // We cannot just use the buffer in the specifier string - need to make a copy here. + const char16* moduleName = this->GetSpecifierSz(); + size_t length = wcslen(moduleName); + char16* allocatedString = RecyclerNewArrayLeaf(scriptContext->GetRecycler(), char16, length + 1); + wmemcpy_s(allocatedString, length + 1, moduleName, length); + allocatedString[length] = _u('\0'); + JavascriptError *pError = scriptContext->GetLibrary()->CreateURIError(); - JavascriptError::SetErrorMessageProperties(pError, hr, this->GetSpecifierSz(), scriptContext); + JavascriptError::SetErrorMessageProperties(pError, hr, allocatedString, scriptContext); *exceptionVar = pError; } else @@ -288,8 +296,15 @@ namespace Js if (FAILED(hr)) { + // We cannot just use the buffer in the specifier string - need to make a copy here. + const char16* moduleName = this->GetSpecifierSz(); + size_t length = wcslen(moduleName); + char16* allocatedString = RecyclerNewArrayLeaf(scriptContext->GetRecycler(), char16, length + 1); + wmemcpy_s(allocatedString, length + 1, moduleName, length); + allocatedString[length] = _u('\0'); + Js::JavascriptError * error = scriptContext->GetLibrary()->CreateURIError(); - JavascriptError::SetErrorMessageProperties(error, hr, this->GetSpecifierSz(), scriptContext); + JavascriptError::SetErrorMessageProperties(error, hr, allocatedString, scriptContext); return SourceTextModuleRecord::ResolveOrRejectDynamicImportPromise(false, error, scriptContext, this); } } diff --git a/lib/Runtime/Library/JavascriptArray.cpp b/lib/Runtime/Library/JavascriptArray.cpp index f922d696549..d8a456511b8 100644 --- a/lib/Runtime/Library/JavascriptArray.cpp +++ b/lib/Runtime/Library/JavascriptArray.cpp @@ -11782,28 +11782,59 @@ namespace Js #endif template - void JavascriptArray::InitBoxedInlineHeadSegment(SparseArraySegment * dst, SparseArraySegment * src) + void JavascriptArray::InitBoxedInlineSegments(SparseArraySegment * dst, SparseArraySegment * src, bool deepCopy) { // Don't copy the segment map, we will build it again SetFlags(GetFlags() & ~DynamicObjectFlags::HasSegmentMap); SetHeadAndLastUsedSegment(dst); + // Copy head segment data dst->left = src->left; dst->length = src->length; dst->size = src->size; dst->CheckLengthvsSize(); - dst->next = src->next; - CopyArray(dst->elements, dst->size, src->elements, src->size); + + if (!deepCopy) + { + // Without a deep copy, point to the existing next segment + dst->next = src->next; + } + else + { + // When deepCopy is true, make a separate copy of each segment. While this involves data + // duplication, it allows the new object to have a separate lifetime without sharing data. + AutoDisableInterrupt failFastError(GetScriptContext()->GetThreadContext()); + do + { + if (src->next != nullptr) + { + // Allocate a new segment in the destination and copy from src + src = SparseArraySegment::From(src->next); + + dst->next = dst->AllocateSegment(GetRecycler(), src->left, src->length, src->size, src->next); + dst = SparseArraySegment::From(dst->next); + + CopyArray(dst->elements, dst->size, src->elements, src->size); + } + else + { + // Terminate the loop + dst->next = nullptr; + dst = nullptr; + } + } while (dst != nullptr); + failFastError.Completed(); + } } - JavascriptArray::JavascriptArray(JavascriptArray * instance, bool boxHead) + JavascriptArray::JavascriptArray(JavascriptArray * instance, bool boxHead, bool deepCopy) : ArrayObject(instance) { if (boxHead) { - InitBoxedInlineHeadSegment(DetermineInlineHeadSegmentPointer(this), SparseArraySegment::From(instance->head)); + InitBoxedInlineSegments(DetermineInlineHeadSegmentPointer(this), SparseArraySegment::From(instance->head), false); } else { @@ -11815,7 +11846,7 @@ namespace Js } template - T * JavascriptArray::BoxStackInstance(T * instance) + T * JavascriptArray::BoxStackInstance(T * instance, bool deepCopy) { Assert(ThreadContext::IsOnStack(instance)); // On the stack, the we reserved a pointer before the object as to store the boxed value @@ -11831,15 +11862,15 @@ namespace Js { boxedInstance = RecyclerNewPlusZ(instance->GetRecycler(), inlineSlotsSize + sizeof(Js::SparseArraySegmentBase) + instance->head->size * sizeof(typename T::TElement), - T, instance, true); + T, instance, true, deepCopy); } else if(inlineSlotsSize) { - boxedInstance = RecyclerNewPlusZ(instance->GetRecycler(), inlineSlotsSize, T, instance, false); + boxedInstance = RecyclerNewPlusZ(instance->GetRecycler(), inlineSlotsSize, T, instance, false, false); } else { - boxedInstance = RecyclerNew(instance->GetRecycler(), T, instance, false); + boxedInstance = RecyclerNew(instance->GetRecycler(), T, instance, false, false); } *boxedInstanceRef = boxedInstance; @@ -11847,9 +11878,9 @@ namespace Js } JavascriptArray * - JavascriptArray::BoxStackInstance(JavascriptArray * instance) + JavascriptArray::BoxStackInstance(JavascriptArray * instance, bool deepCopy) { - return BoxStackInstance(instance); + return BoxStackInstance(instance, deepCopy); } #if ENABLE_TTD @@ -11917,17 +11948,17 @@ namespace Js #endif JavascriptNativeArray::JavascriptNativeArray(JavascriptNativeArray * instance) : - JavascriptArray(instance, false), + JavascriptArray(instance, false, false), weakRefToFuncBody(instance->weakRefToFuncBody) { } - JavascriptNativeIntArray::JavascriptNativeIntArray(JavascriptNativeIntArray * instance, bool boxHead) : + JavascriptNativeIntArray::JavascriptNativeIntArray(JavascriptNativeIntArray * instance, bool boxHead, bool deepCopy) : JavascriptNativeArray(instance) { if (boxHead) { - InitBoxedInlineHeadSegment(DetermineInlineHeadSegmentPointer(this), SparseArraySegment::From(instance->head)); + InitBoxedInlineSegments(DetermineInlineHeadSegmentPointer(this), SparseArraySegment::From(instance->head), deepCopy); } else { @@ -11938,9 +11969,9 @@ namespace Js } JavascriptNativeIntArray * - JavascriptNativeIntArray::BoxStackInstance(JavascriptNativeIntArray * instance) + JavascriptNativeIntArray::BoxStackInstance(JavascriptNativeIntArray * instance, bool deepCopy) { - return JavascriptArray::BoxStackInstance(instance); + return JavascriptArray::BoxStackInstance(instance, deepCopy); } #if ENABLE_TTD @@ -11968,12 +11999,12 @@ namespace Js #endif #endif - JavascriptNativeFloatArray::JavascriptNativeFloatArray(JavascriptNativeFloatArray * instance, bool boxHead) : + JavascriptNativeFloatArray::JavascriptNativeFloatArray(JavascriptNativeFloatArray * instance, bool boxHead, bool deepCopy) : JavascriptNativeArray(instance) { if (boxHead) { - InitBoxedInlineHeadSegment(DetermineInlineHeadSegmentPointer(this), SparseArraySegment::From(instance->head)); + InitBoxedInlineSegments(DetermineInlineHeadSegmentPointer(this), SparseArraySegment::From(instance->head), deepCopy); } else { @@ -11984,9 +12015,9 @@ namespace Js } JavascriptNativeFloatArray * - JavascriptNativeFloatArray::BoxStackInstance(JavascriptNativeFloatArray * instance) + JavascriptNativeFloatArray::BoxStackInstance(JavascriptNativeFloatArray * instance, bool deepCopy) { - return JavascriptArray::BoxStackInstance(instance); + return JavascriptArray::BoxStackInstance(instance, deepCopy); } #if ENABLE_TTD diff --git a/lib/Runtime/Library/JavascriptArray.h b/lib/Runtime/Library/JavascriptArray.h index 57197de4fe2..8b0632ffa70 100644 --- a/lib/Runtime/Library/JavascriptArray.h +++ b/lib/Runtime/Library/JavascriptArray.h @@ -425,7 +425,7 @@ namespace Js JavascriptArray(uint32 length, DynamicType * type); // For BoxStackInstance - JavascriptArray(JavascriptArray * instance, bool boxHead); + JavascriptArray(JavascriptArray * instance, bool boxHead, bool deepCopy); template inline void LinkSegments(SparseArraySegment* prev, SparseArraySegment* current); template inline SparseArraySegment* ReallocNonLeafSegment(SparseArraySegment* seg, SparseArraySegmentBase* nextSeg, bool forceNonLeaf = false); @@ -897,11 +897,11 @@ namespace Js static Var SpreadArrayArgs(Var arrayToSpread, const Js::AuxArray *spreadIndices, ScriptContext *scriptContext); static uint32 GetSpreadArgLen(Var spreadArg, ScriptContext *scriptContext); - static JavascriptArray * BoxStackInstance(JavascriptArray * instance); + static JavascriptArray * BoxStackInstance(JavascriptArray * instance, bool deepCopy); protected: - template void InitBoxedInlineHeadSegment(SparseArraySegment * dst, SparseArraySegment * src); + template void InitBoxedInlineSegments(SparseArraySegment * dst, SparseArraySegment * src, bool deepCopy); - template static T * BoxStackInstance(T * instance); + template static T * BoxStackInstance(T * instance, bool deepCopy); public: template static size_t DetermineAllocationSize(const uint inlineElementSlots, size_t *const allocationPlusSizeRef = nullptr, uint *const alignedInlineElementSlotsRef = nullptr); @@ -1019,7 +1019,7 @@ namespace Js JavascriptNativeArray(length, type) {} // For BoxStackInstance - JavascriptNativeIntArray(JavascriptNativeIntArray * instance, bool boxHead); + JavascriptNativeIntArray(JavascriptNativeIntArray * instance, bool boxHead, bool deepCopy); public: static Var NewInstance(RecyclableObject* function, CallInfo callInfo, ...); static Var NewInstance(RecyclableObject* function, Arguments args); @@ -1077,7 +1077,7 @@ namespace Js return LibraryValue::ValueNativeIntArrayType; } static DynamicType * GetInitialType(ScriptContext * scriptContext); - static JavascriptNativeIntArray * BoxStackInstance(JavascriptNativeIntArray * instance); + static JavascriptNativeIntArray * BoxStackInstance(JavascriptNativeIntArray * instance, bool deepCopy); private: virtual int32 HeadSegmentIndexOfHelper(Var search, uint32 &fromIndex, uint32 toIndex, bool includesAlgorithm, ScriptContext * scriptContext) override; @@ -1182,7 +1182,7 @@ namespace Js JavascriptNativeArray(length, type) {} // For BoxStackInstance - JavascriptNativeFloatArray(JavascriptNativeFloatArray * instance, bool boxHead); + JavascriptNativeFloatArray(JavascriptNativeFloatArray * instance, bool boxHead, bool deepCopy); public: static Var NewInstance(RecyclableObject* function, CallInfo callInfo, ...); @@ -1244,7 +1244,7 @@ namespace Js static DynamicType * GetInitialType(ScriptContext * scriptContext); static Var Push(ScriptContext * scriptContext, Var * nativeFloatArray, double value); - static JavascriptNativeFloatArray * BoxStackInstance(JavascriptNativeFloatArray * instance); + static JavascriptNativeFloatArray * BoxStackInstance(JavascriptNativeFloatArray * instance, bool deepCopy); static double Pop(ScriptContext * scriptContext, Var nativeFloatArray); private: virtual int32 HeadSegmentIndexOfHelper(Var search, uint32 &fromIndex, uint32 toIndex, bool includesAlgorithm, ScriptContext * scriptContext) override; diff --git a/lib/Runtime/Library/JavascriptArray.inl b/lib/Runtime/Library/JavascriptArray.inl index 02738e37f14..e60cacec0b3 100644 --- a/lib/Runtime/Library/JavascriptArray.inl +++ b/lib/Runtime/Library/JavascriptArray.inl @@ -695,7 +695,7 @@ SECOND_PASS: SparseArraySegment* JavascriptArray::PrepareSegmentForMemOp(uint32 startIndex, uint32 length) { uint32 endIndex; - if(UInt32Math::Add(startIndex, length - 1, &endIndex)) + if (UInt32Math::Add(startIndex, length - 1, &endIndex)) { return nullptr; } @@ -711,29 +711,38 @@ SECOND_PASS: } } + // We are messing with the segments and the length of the array. + // We must be certain we reach the end of this function without + // any interruption to guaranty coherence of the array + AutoDisableInterrupt autoDisableInterrupt(this->GetScriptContext()->GetThreadContext()); + this->EnsureHead(); Recycler* recycler = GetRecycler(); //Find the segment where itemIndex is present or is at the boundary - SparseArraySegment* current = (SparseArraySegment*)this->GetBeginLookupSegment(startIndex, false); + SparseArraySegment* current = (SparseArraySegment*)this->GetLastUsedSegment(); SparseArraySegmentBase* prev = nullptr; SparseArraySegmentBase* startSeg = nullptr; SparseArraySegmentBase* endSeg = nullptr; SparseArraySegmentBase* startPrev = nullptr; uint32 growby, startOffset, endOffset; + bool isAllocationSolelyInLastUsedSegment = false; - const auto FindStartAndEndSegment = [&]() + // FindStartAndEndSegment { - if (endIndex >= current->left + current->size) + if (current->left > startIndex || endIndex >= current->left + current->size) { + // The allocation may touch other segments, just start looking from head current = SparseArraySegment::From(head); } else { + // We are allocating solely in the last used segment startSeg = endSeg = current; current = nullptr; + isAllocationSolelyInLastUsedSegment = true; } while (current != nullptr) @@ -779,10 +788,12 @@ SECOND_PASS: { startPrev = prev; } - }; + } - const auto ResizeArrayIfStartIsOutsideArrayLength = [&]() + if (startSeg == nullptr) { + // if start index is greater than array length then we can add a new segment (or extend the last segment based on some heuristics) + // ResizeArrayIfStartIsOutsideArrayLength; Assert(endSeg == nullptr); Assert(startIndex >= head->size); // Reallocate head if it meets a heuristics @@ -818,10 +829,14 @@ SECOND_PASS: SetHasNoMissingValues(); } } - }; - - const auto ExtendStartSegmentForMemOp = [&]() + } + else { + // once we found the start segment we extend the start segment until startIndex+length . We don't care about what was there + // as they will be overwritten by the memset/ memcopy. Then we need to append items from the (startIndex+length) to array.length + // from the end segment to the new array + + // ExtendStartSegmentForMemOp SparseArraySegmentBase *oldStartSeg = startSeg; bool isInlineSegment = false; startOffset = startIndex - startSeg->left; @@ -874,7 +889,7 @@ SECOND_PASS: } } } - current->length = current->length > (startOffset + length) ? current->length : (startOffset + length); + current->length = current->length > (startOffset + length) ? current->length : (startOffset + length); current->CheckLengthvsSize(); Assert(current == oldStartSeg); } @@ -882,24 +897,12 @@ SECOND_PASS: else if ((startIndex + 1) <= startSeg->left) { isInlineSegment = JavascriptArray::IsInlineSegment(startSeg, this); - if (startIndex + 1 == startSeg->left && startPrev == head) - { - current = SparseArraySegment::From(head)->GrowByMin(recycler, startIndex + length - head->size); - current->length = endIndex + 1; - current->CheckLengthvsSize(); - head = current; - } - else + // startIndex is in between prev and startIndex + current = SparseArraySegment::template AllocateSegmentImpl(recycler, startIndex, length, (SparseArraySegmentBase*)nullptr); + LinkSegments((Js::SparseArraySegment*)startPrev, current); + if (current == head) { - // startIndex is in between prev and startIndex - current = SparseArraySegment::AllocateSegment(recycler, startIndex, length, (SparseArraySegment *)nullptr); - LinkSegments((Js::SparseArraySegment*)startPrev, current); - if (current == head) - { - SetHasNoMissingValues(); - } - current->length = length; - current->CheckLengthvsSize(); + SetHasNoMissingValues(); } } else @@ -929,12 +932,11 @@ SECOND_PASS: { this->ClearElements(oldStartSeg, 0); } - }; - const auto AppendLeftOverItemsFromEndSegment = [&]() - { + // AppendLeftOverItemsFromEndSegment + SparseArraySegmentBase *oldCurrent = current; - bool isInlineSegment = false; + isInlineSegment = false; if (!endSeg) { // end is beyond the length of the array @@ -1018,25 +1020,14 @@ SECOND_PASS: { this->ClearElements(oldCurrent, 0); } - }; - FindStartAndEndSegment(); - if (startSeg == nullptr) - { - // if start index is greater than array length then we can add a new segment (or extend the last segment based on some heuristics) - ResizeArrayIfStartIsOutsideArrayLength(); - } - else - { - // once we found the start segment we extend the start segment until startIndex+length . We don't care about what was there - // as they will be overwritten by the memset/ memcopy. Then we need to append items from the (startIndex+length) to array.length - // from the end segment to the new array - ExtendStartSegmentForMemOp(); - AppendLeftOverItemsFromEndSegment(); } Assert(current); Assert(current->left <= startIndex); Assert((startIndex - current->left) < current->size); + // If we are solely using the last used segment, make sure there were no allocation done + Assert(!isAllocationSolelyInLastUsedSegment || (current == startSeg && current == endSeg)); + autoDisableInterrupt.Completed(); return current; } @@ -1189,7 +1180,7 @@ SECOND_PASS: } else { - DirectSetItemAtRangeFull(startIndex, length, newValue); + return DirectSetItemAtRangeFull(startIndex, length, newValue); } return true; } diff --git a/lib/Runtime/Library/JavascriptFunction.cpp b/lib/Runtime/Library/JavascriptFunction.cpp index 23f883981f3..07db7063918 100644 --- a/lib/Runtime/Library/JavascriptFunction.cpp +++ b/lib/Runtime/Library/JavascriptFunction.cpp @@ -869,6 +869,8 @@ namespace Js // JavascriptOperators::NewScObject should have thrown if 'v' is not a constructor RecyclableObject* functionObj = RecyclableObject::FromVar(v); + const unsigned STACK_ARGS_ALLOCA_THRESHOLD = 8; // Number of stack args we allow before using _alloca + Var stackArgs[STACK_ARGS_ALLOCA_THRESHOLD]; Var* newValues = args.Values; CallFlags newFlags = args.Info.Flags; @@ -886,8 +888,6 @@ namespace Js { newCount++; newFlags = (CallFlags)(newFlags | CallFlags_NewTarget | CallFlags_ExtraArg); - const unsigned STACK_ARGS_ALLOCA_THRESHOLD = 8; // Number of stack args we allow before using _alloca - Var stackArgs[STACK_ARGS_ALLOCA_THRESHOLD]; if (newCount > STACK_ARGS_ALLOCA_THRESHOLD) { PROBE_STACK(scriptContext, newCount * sizeof(Var) + Js::Constants::MinStackDefault); // args + function call @@ -2885,6 +2885,7 @@ void __cdecl _alloca_probe_16() // and foo.arguments[n] will be maintained after this object is returned. JavascriptStackWalker walker(scriptContext); + walker.SetDeepCopyForArguments(); if (walker.WalkToTarget(this)) { diff --git a/lib/Runtime/Library/WebAssemblyModule.cpp b/lib/Runtime/Library/WebAssemblyModule.cpp index 1fbea12d2cf..0468f658542 100644 --- a/lib/Runtime/Library/WebAssemblyModule.cpp +++ b/lib/Runtime/Library/WebAssemblyModule.cpp @@ -175,8 +175,14 @@ Var WebAssemblyModule::EntryCustomSections(RecyclableObject* function, CallInfo WebAssemblyModule * module = WebAssemblyModule::FromVar(args[1]); Var customSections = nullptr; + // C++ compiler optimizations can optimize away the sectionName variable while still keeping a reference to the underlying + // character buffer sectionNameBuf. The character buffer itself is managed by the recycler; however; we may move past the + // start of the character buffer while doing the comparison in memcmp. If a GC happens during CreateArrayBuffer, the character + // buffer can get collected as we don't have a reference to the start of the buffer on the stack anymore. To avoid this we need + // to pin sectionName. ENTER_PINNED_SCOPE(JavascriptString, sectionName); sectionName = JavascriptConversion::ToString(sectionNameVar, scriptContext); + const char16* sectionNameBuf = sectionName->GetString(); charcount_t sectionNameLength = sectionName->GetLength(); diff --git a/lib/Runtime/Types/DynamicObject.cpp b/lib/Runtime/Types/DynamicObject.cpp index 20f0c79cfb4..91166beb52a 100644 --- a/lib/Runtime/Types/DynamicObject.cpp +++ b/lib/Runtime/Types/DynamicObject.cpp @@ -524,8 +524,12 @@ namespace Js void DynamicObject::ChangeType() { + AutoDisableInterrupt autoDisableInterrupt(this->GetScriptContext()->GetThreadContext()); + Assert(!GetDynamicType()->GetIsShared() || GetTypeHandler()->GetIsShared()); this->type = this->DuplicateType(); + + autoDisableInterrupt.Completed(); } void DynamicObject::ChangeTypeIf(const Type* oldType) diff --git a/test/Optimizer/bug41530.js b/test/Optimizer/bug41530.js new file mode 100644 index 00000000000..0ecafa4ff61 --- /dev/null +++ b/test/Optimizer/bug41530.js @@ -0,0 +1,26 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +function opt(arr) { + if (arr.length <= 15) + return; + + let j = 0; + for (let i = 0; i < 2; i++) { + arr[j] = 0x1234; // (a) + j += 0x100000; + j + 0x7ffffff0; + } +} + +function main() { + for (let i = 0; i < 0x10000; i++) { + opt(new Uint32Array(100)); + } +} + +main(); + +WScript.Echo('pass'); diff --git a/test/Optimizer/bug42111.js b/test/Optimizer/bug42111.js new file mode 100644 index 00000000000..08b868585a0 --- /dev/null +++ b/test/Optimizer/bug42111.js @@ -0,0 +1,28 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +function opt(arr, start, end) { + for (let i = start; i < end; i++) { + if (i === 10) { + i += 0; + } + arr[i] = 2.3023e-320; + } +} + +let arr = new Array(100); + +function main() { + arr.fill(1.1); + + for (let i = 0; i < 1000; i++) + opt(arr, 0, 3); + + opt(arr, 0, 100000); +} + +main(); + +WScript.Echo(arr[0] === 2.3023e-320 ? 'pass' : 'fail'); diff --git a/test/Optimizer/rlexe.xml b/test/Optimizer/rlexe.xml index 8bf31fef4fa..495aa3ddabd 100644 --- a/test/Optimizer/rlexe.xml +++ b/test/Optimizer/rlexe.xml @@ -5,6 +5,16 @@ bug318.js + + + bug41530.js + + + + + bug42111.js + + bug70.js diff --git a/test/es6/function-expr-capture.js b/test/es6/function-expr-capture.js new file mode 100644 index 00000000000..9ce2c4865c6 --- /dev/null +++ b/test/es6/function-expr-capture.js @@ -0,0 +1,13 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +(function func(arg = function () { + return func; + }()) { + return func; + function func() {} +})(); + +WScript.Echo('pass'); diff --git a/test/es6/function-expr-capture2.js b/test/es6/function-expr-capture2.js new file mode 100644 index 00000000000..8eab05c7be0 --- /dev/null +++ b/test/es6/function-expr-capture2.js @@ -0,0 +1,16 @@ +//------------------------------------------------------------------------------------------------------- +// Copyright (C) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. +//------------------------------------------------------------------------------------------------------- + +eval( + '(function f() {' + + ' with({}) {' + + ' (function () {' + + ' return f;' + + ' })();' + + ' }' + + ' }());' +); + +WScript.Echo('pass'); diff --git a/test/es6/rlexe.xml b/test/es6/rlexe.xml index 6975d8d6514..31ec0c16734 100644 --- a/test/es6/rlexe.xml +++ b/test/es6/rlexe.xml @@ -1350,14 +1350,14 @@ dynamic-module-functionality.js - -ES6Module -args summary -endargs + -ES6Module -ESDynamicImport -args summary -endargs exclude_sanitize_address dynamic-module-import-specifier.js - -MuteHostErrorMsg -ES6Module -args summary -endargs + -MuteHostErrorMsg -ES6Module -ESDynamicImport -args summary -endargs exclude_sanitize_address @@ -1393,7 +1393,7 @@ bug_OS12095746.js - -MuteHostErrorMsg -IgnoreScriptErrorCode -TraceHostCallback -ES6Module + -MuteHostErrorMsg -IgnoreScriptErrorCode -TraceHostCallback -ES6Module -ESDynamicImport exclude_dynapogo,exclude_sanitize_address,bugfix,exclude_drt bug_OS12095746.baseline @@ -1442,7 +1442,7 @@ bug_issue_3076.js - -force:deferparse + -force:deferparse -ESDynamicImport BugFix,exclude_sanitize_address @@ -1456,6 +1456,7 @@ bug_issue_3257.js + -ESDynamicImport bug_issue_3257.baseline BugFix,exclude_sanitize_address @@ -1516,4 +1517,15 @@ -off:deferparse -args summary -endargs -deferparse -forceundodefer + + + function-expr-capture.js + + + + + function-expr-capture2.js + -force:deferparse + +