diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt index b0db4a4b67a208..104a7be7f18202 100644 --- a/src/coreclr/jit/CMakeLists.txt +++ b/src/coreclr/jit/CMakeLists.txt @@ -299,7 +299,6 @@ set( JIT_HEADERS bitsetops.h bitvec.h block.h - blockset.h codegen.h codegeninterface.h compiler.h diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 69017ea2dfb544..2ecf31eb23c7b7 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -617,8 +617,7 @@ void BasicBlock::dspSuccs(Compiler* compiler) // compute it ourselves here. if (bbKind == BBJ_SWITCH) { - // Create a set with all the successors. Don't use BlockSet, so we don't need to worry - // about the BlockSet epoch. + // Create a set with all the successors. unsigned bbNumMax = compiler->fgBBNumMax; BitVecTraits bitVecTraits(bbNumMax + 1, compiler); BitVec uniqueSuccBlocks(BitVecOps::MakeEmpty(&bitVecTraits)); @@ -1036,10 +1035,10 @@ unsigned JitPtrKeyFuncs::GetHashCode(const BasicBlock* ptr) unsigned hash = SsaStressHashHelper(); if (hash != 0) { - return (hash ^ (ptr->bbNum << 16) ^ ptr->bbNum); + return (hash ^ (ptr->bbID << 16) ^ ptr->bbID); } #endif - return ptr->bbNum; + return ptr->bbID; } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 9766fd53ae5945..43c909045db132 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -23,7 +23,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Defines VARSET_TP #include "varset.h" -#include "blockset.h" #include "jitstd.h" #include "bitvec.h" #include "jithashtable.h" diff --git a/src/coreclr/jit/blockset.h b/src/coreclr/jit/blockset.h deleted file mode 100644 index f69e1e59ace324..00000000000000 --- a/src/coreclr/jit/blockset.h +++ /dev/null @@ -1,61 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// This include file determines how BlockSet is implemented. -// -#ifndef _BLOCKSET_INCLUDED_ -#define _BLOCKSET_INCLUDED_ 1 - -// A BlockSet is a set of BasicBlocks, represented by the BasicBlock number (bbNum). -// Unlike VARSET_TP, we only support a single implementation: the bitset "shortlong" -// implementation. -// -// Note that BasicBlocks in the JIT are numbered starting at 1. We always just waste the -// 0th bit to avoid having to do "bbNum - 1" calculations everywhere (at the BlockSet call -// sites). This makes reading the code easier, and avoids potential problems of forgetting -// to do a "- 1" somewhere. -// -// Basic blocks can be renumbered during compilation, so it is important to not mix -// BlockSets created before and after a renumbering. Every time the blocks are renumbered -// creates a different "epoch", during which the basic block numbers are stable. - -#include "bitset.h" -#include "compilerbitsettraits.h" -#include "bitsetasshortlong.h" - -class BlockSetOps - : public BitSetOps -{ -public: - // Specialize BlockSetOps::MakeFull(). Since we number basic blocks from one, we remove bit zero from - // the block set. Otherwise, IsEmpty() would never return true. - static BitSetShortLongRep MakeFull(Compiler* env) - { - BitSetShortLongRep retval; - - // First, make a full set using the BitSetOps::MakeFull - - retval = BitSetOps::MakeFull(env); - - // Now, remove element zero, since we number basic blocks starting at one, and index the set with the - // basic block number. If we left this, then IsEmpty() would never return true. - BlockSetOps::RemoveElemD(env, retval, 0); - - return retval; - } -}; - -typedef BitSetShortLongRep BlockSet; - -// These types should be used as the types for BlockSet arguments and return values, respectively. -typedef BlockSetOps::ValArgType BlockSet_ValArg_T; -typedef BlockSetOps::RetValType BlockSet_ValRet_T; - -#endif // _BLOCKSET_INCLUDED_ diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 740182cdf2b7e5..91daacdb3837ca 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -36,7 +36,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "regalloc.h" #include "sm.h" #include "cycletimer.h" -#include "blockset.h" #include "arraystack.h" #include "priorityqueue.h" #include "hashbv.h" @@ -5225,65 +5224,6 @@ class Compiler return getAllocator(cmk).allocate(fgBBNumMax + 1); } - // BlockSets are relative to a specific set of BasicBlock numbers. If that changes - // (if the blocks are renumbered), this changes. BlockSets from different epochs - // cannot be meaningfully combined. Note that new blocks can be created with higher - // block numbers without changing the basic block epoch. These blocks *cannot* - // participate in a block set until the blocks are all renumbered, causing the epoch - // to change. This is useful if continuing to use previous block sets is valuable. - // If the epoch is zero, then it is uninitialized, and block sets can't be used. - unsigned fgCurBBEpoch = 0; - - unsigned GetCurBasicBlockEpoch() - { - return fgCurBBEpoch; - } - - // The number of basic blocks in the current epoch. When the blocks are renumbered, - // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains - // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. - unsigned fgCurBBEpochSize = 0; - - // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize - // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. - unsigned fgBBSetCountInSizeTUnits = 0; - - void NewBasicBlockEpoch() - { - INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); - - // We have a new epoch. Compute and cache the size needed for new BlockSets. - fgCurBBEpoch++; - fgCurBBEpochSize = fgBBNumMax + 1; - fgBBSetCountInSizeTUnits = - roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); - -#ifdef DEBUG - if (verbose) - { - unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this); - printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", - fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); - if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) - { - // If we're not just establishing the first epoch, and the epoch array size has changed such that we're - // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an - // array of size_t bitsets), then print that out. - printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); - } - printf("\n"); - } -#endif // DEBUG - } - - void EnsureBasicBlockEpoch() - { - if (fgCurBBEpochSize != fgBBNumMax + 1) - { - NewBasicBlockEpoch(); - } - } - bool fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); @@ -6305,7 +6245,7 @@ class Compiler }; template - void fgMoveHotJumps(); + void fgMoveHotJumps(FlowGraphDfsTree* dfsTree); bool fgFuncletsAreCold(); diff --git a/src/coreclr/jit/compilerbitsettraits.h b/src/coreclr/jit/compilerbitsettraits.h index 965ffac55465e1..ffdb19c501b205 100644 --- a/src/coreclr/jit/compilerbitsettraits.h +++ b/src/coreclr/jit/compilerbitsettraits.h @@ -69,30 +69,6 @@ class AllVarBitSetTraits : public CompAllocBitSetTraits static inline BitSetSupport::BitSetOpCounter* GetOpCounter(Compiler* comp); }; -/////////////////////////////////////////////////////////////////////////////// -// -// BasicBlockBitSetTraits -// -// This class is customizes the bit set to represent sets of BasicBlocks. -// The size of the bitset is determined by maximum assigned BasicBlock number -// (Compiler::fgBBNumMax) (Note that fgBBcount is not equal to this during inlining, -// when fgBBcount is the number of blocks in the inlined function, but the assigned -// block numbers are higher than the inliner function. fgBBNumMax counts both. -// Thus, if you only care about the inlinee, during inlining, this bit set will waste -// the lower numbered block bits.) The Compiler* tracks the BasicBlock epochs. -// -class BasicBlockBitSetTraits : public CompAllocBitSetTraits -{ -public: - static inline unsigned GetSize(Compiler* comp); - - static inline unsigned GetArrSize(Compiler* comp); - - static inline unsigned GetEpoch(class Compiler* comp); - - static inline BitSetSupport::BitSetOpCounter* GetOpCounter(Compiler* comp); -}; - /////////////////////////////////////////////////////////////////////////////// // // BitVecTraits diff --git a/src/coreclr/jit/compilerbitsettraits.hpp b/src/coreclr/jit/compilerbitsettraits.hpp index cb209ba22993ad..c405de9f589362 100644 --- a/src/coreclr/jit/compilerbitsettraits.hpp +++ b/src/coreclr/jit/compilerbitsettraits.hpp @@ -96,40 +96,6 @@ BitSetSupport::BitSetOpCounter* AllVarBitSetTraits::GetOpCounter(Compiler* comp) #endif } -/////////////////////////////////////////////////////////////////////////////// -// -// BasicBlockBitSetTraits -// -/////////////////////////////////////////////////////////////////////////////// - -// static -unsigned BasicBlockBitSetTraits::GetSize(Compiler* comp) -{ - return comp->fgCurBBEpochSize; -} - -// static -unsigned BasicBlockBitSetTraits::GetArrSize(Compiler* comp) -{ - // Assert that the epoch has been initialized. This is a convenient place to assert this because - // GetArrSize() is called for every function, via IsShort(). - assert(GetEpoch(comp) != 0); - - return comp->fgBBSetCountInSizeTUnits; // This is precomputed to avoid doing math every time this function is called -} - -// static -unsigned BasicBlockBitSetTraits::GetEpoch(Compiler* comp) -{ - return comp->GetCurBasicBlockEpoch(); -} - -// static -BitSetSupport::BitSetOpCounter* BasicBlockBitSetTraits::GetOpCounter(Compiler* comp) -{ - return nullptr; -} - /////////////////////////////////////////////////////////////////////////////// // // BitVecTraits diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index be84f883a6426b..d27fc704eca583 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -5400,30 +5400,6 @@ bool Compiler::fgRenumberBlocks() JITDUMP("=============== No blocks renumbered!\n"); } - // Now update the BlockSet epoch, which depends on the block numbers. - // If any blocks have been renumbered then create a new BlockSet epoch. - // Even if we have not renumbered any blocks, we might still need to force - // a new BlockSet epoch, for one of several reasons. If there are any new - // blocks with higher numbers than the former maximum numbered block, then we - // need a new epoch with a new size matching the new largest numbered block. - // Also, if the number of blocks is different from the last time we set the - // BlockSet epoch, then we need a new epoch. This wouldn't happen if we - // renumbered blocks after every block addition/deletion, but it might be - // the case that we can change the number of blocks, then set the BlockSet - // epoch without renumbering, then change the number of blocks again, then - // renumber. - if (renumbered || newMaxBBNum) - { - NewBasicBlockEpoch(); - - // The key in the unique switch successor map is dependent on the block number, so invalidate that cache. - InvalidateUniqueSwitchSuccMap(); - } - else - { - EnsureBasicBlockEpoch(); - } - // Tell our caller if any blocks actually were renumbered. return renumbered || newMaxBBNum; } diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 5c91836b28f3d1..85c3c560b6ba58 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -530,11 +530,7 @@ Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switc { // We must compute the descriptor. Find which are dups, by creating a bit set with the unique successors. // We create a temporary bitset of blocks to compute the unique set of successor blocks, - // since adding a block's number twice leaves just one "copy" in the bitset. Note that - // we specifically don't use the BlockSet type, because doing so would require making a - // call to EnsureBasicBlockEpoch() to make sure the epoch is up-to-date. However, that - // can create a new epoch, thus invalidating all existing BlockSet objects, such as - // reachability information stored in the blocks. To avoid that, we just use a local BitVec. + // since adding a block's number twice leaves just one "copy" in the bitset. BitVecTraits blockVecTraits(fgBBNumMax + 1, this); BitVec uniqueSuccBlocks(BitVecOps::MakeEmpty(&blockVecTraits)); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 231eeb22c48248..978555e03684b1 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -4463,8 +4463,11 @@ bool Compiler::fgReorderBlocks(bool useProfile) // Template parameters: // hasEH - If true, method has EH regions, so check that we don't try to move blocks in different regions // +// Parameters: +// dfsTree - The depth-first traversal of the flowgraph +// template -void Compiler::fgMoveHotJumps() +void Compiler::fgMoveHotJumps(FlowGraphDfsTree* dfsTree) { #ifdef DEBUG if (verbose) @@ -4477,9 +4480,9 @@ void Compiler::fgMoveHotJumps() } #endif // DEBUG - EnsureBasicBlockEpoch(); - BlockSet visitedBlocks(BlockSetOps::MakeEmpty(this)); - BlockSetOps::AddElemD(this, visitedBlocks, fgFirstBB->bbNum); + assert(dfsTree != nullptr); + BitVecTraits traits(dfsTree->PostOrderTraits()); + BitVec visitedBlocks = BitVecOps::MakeEmpty(&traits); // If we have a funclet region, don't bother reordering anything in it. // @@ -4487,7 +4490,12 @@ void Compiler::fgMoveHotJumps() for (BasicBlock* block = fgFirstBB; block != fgFirstFuncletBB; block = next) { next = block->Next(); - BlockSetOps::AddElemD(this, visitedBlocks, block->bbNum); + if (!dfsTree->Contains(block)) + { + continue; + } + + BitVecOps::AddElemD(&traits, visitedBlocks, block->bbPostorderNum); // Don't bother trying to move cold blocks // @@ -4534,7 +4542,8 @@ void Compiler::fgMoveHotJumps() } BasicBlock* target = targetEdge->getDestinationBlock(); - bool isBackwardJump = BlockSetOps::IsMember(this, visitedBlocks, target->bbNum); + bool isBackwardJump = BitVecOps::IsMember(&traits, visitedBlocks, target->bbPostorderNum); + assert(dfsTree->Contains(target)); if (isBackwardJump) { @@ -4553,7 +4562,8 @@ void Compiler::fgMoveHotJumps() // targetEdge = unlikelyEdge; target = targetEdge->getDestinationBlock(); - isBackwardJump = BlockSetOps::IsMember(this, visitedBlocks, target->bbNum); + isBackwardJump = BitVecOps::IsMember(&traits, visitedBlocks, target->bbPostorderNum); + assert(dfsTree->Contains(target)); if (isBackwardJump) { @@ -4696,7 +4706,7 @@ void Compiler::fgDoReversePostOrderLayout() } } - fgMoveHotJumps(); + fgMoveHotJumps(dfsTree); return; } @@ -4769,7 +4779,7 @@ void Compiler::fgDoReversePostOrderLayout() fgInsertBBafter(pair.callFinally, pair.callFinallyRet); } - fgMoveHotJumps(); + fgMoveHotJumps(dfsTree); } //----------------------------------------------------------------------------- diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 7c32cc66f10d81..4bbcb6ea9b7aa4 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -821,8 +821,8 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { // We will track visited or queued nodes with a bit vector. // - EnsureBasicBlockEpoch(); - BlockSet marked = BlockSetOps::MakeEmpty(this); + BitVecTraits traits(compBasicBlockID, this); + BitVec marked = BitVecOps::MakeEmpty(&traits); // And nodes to visit with a bit vector and stack. // @@ -834,7 +834,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // Bit vector to track progress through those successors. // ArrayStack scratch(getAllocator(CMK_Pgo)); - BlockSet processed = BlockSetOps::MakeEmpty(this); + BitVec processed = BitVecOps::MakeEmpty(&traits); // Push the method entry and all EH handler region entries on the stack. // (push method entry last so it's visited first). @@ -852,18 +852,18 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { BasicBlock* hndBegBB = HBtab->ebdHndBeg; stack.Push(hndBegBB); - BlockSetOps::AddElemD(this, marked, hndBegBB->bbNum); + BitVecOps::AddElemD(&traits, marked, hndBegBB->bbID); if (HBtab->HasFilter()) { BasicBlock* filterBB = HBtab->ebdFilter; stack.Push(filterBB); - BlockSetOps::AddElemD(this, marked, filterBB->bbNum); + BitVecOps::AddElemD(&traits, marked, filterBB->bbID); } } } stack.Push(fgFirstBB); - BlockSetOps::AddElemD(this, marked, fgFirstBB->bbNum); + BitVecOps::AddElemD(&traits, marked, fgFirstBB->bbID); unsigned nBlocks = 0; @@ -873,7 +873,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // Visit the block. // - assert(BlockSetOps::IsMember(this, marked, block->bbNum)); + assert(BitVecOps::IsMember(&traits, marked, block->bbID)); visitor->VisitBlock(block); nBlocks++; @@ -896,10 +896,10 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // This block should be the only pred of the continuation. // BasicBlock* const target = block->Next(); - assert(!BlockSetOps::IsMember(this, marked, target->bbNum)); + assert(!BitVecOps::IsMember(&traits, marked, target->bbID)); visitor->VisitTreeEdge(block, target); stack.Push(target); - BlockSetOps::AddElemD(this, marked, target->bbNum); + BitVecOps::AddElemD(&traits, marked, target->bbID); } } break; @@ -928,7 +928,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // profiles for methods that throw lots of exceptions. // BasicBlock* const target = fgFirstBB; - assert(BlockSetOps::IsMember(this, marked, target->bbNum)); + assert(BitVecOps::IsMember(&traits, marked, target->bbID)); visitor->VisitNonTreeEdge(block, target, SpanningTreeVisitor::EdgeKind::Pseudo); } break; @@ -967,7 +967,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) } else { - if (BlockSetOps::IsMember(this, marked, target->bbNum)) + if (BitVecOps::IsMember(&traits, marked, target->bbID)) { visitor->VisitNonTreeEdge(block, target, SpanningTreeVisitor::EdgeKind::PostdominatesSource); @@ -976,7 +976,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { visitor->VisitTreeEdge(block, target); stack.Push(target); - BlockSetOps::AddElemD(this, marked, target->bbNum); + BitVecOps::AddElemD(&traits, marked, target->bbID); } } } @@ -985,7 +985,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // Pseudo-edge back to handler entry. // BasicBlock* const target = dsc->ebdHndBeg; - assert(BlockSetOps::IsMember(this, marked, target->bbNum)); + assert(BitVecOps::IsMember(&traits, marked, target->bbID)); visitor->VisitNonTreeEdge(block, target, SpanningTreeVisitor::EdgeKind::Pseudo); } } @@ -1016,7 +1016,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // Not a fork. Just visit the sole successor. // BasicBlock* const target = block->GetSucc(0, this); - if (BlockSetOps::IsMember(this, marked, target->bbNum)) + if (BitVecOps::IsMember(&traits, marked, target->bbID)) { // We can't instrument in the call finally pair tail block // so treat this as a critical edge. @@ -1030,7 +1030,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { visitor->VisitTreeEdge(block, target); stack.Push(target); - BlockSetOps::AddElemD(this, marked, target->bbNum); + BitVecOps::AddElemD(&traits, marked, target->bbID); } } else @@ -1046,7 +1046,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // edges from non-rare to rare be non-tree edges. // scratch.Reset(); - BlockSetOps::ClearD(this, processed); + BitVecOps::ClearD(&traits, processed); for (unsigned i = 0; i < numSucc; i++) { @@ -1060,7 +1060,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { BasicBlock* const target = scratch.Top(i); - if (BlockSetOps::IsMember(this, processed, i)) + if (BitVecOps::IsMember(&traits, processed, i)) { continue; } @@ -1070,9 +1070,9 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) continue; } - BlockSetOps::AddElemD(this, processed, i); + BitVecOps::AddElemD(&traits, processed, i); - if (BlockSetOps::IsMember(this, marked, target->bbNum)) + if (BitVecOps::IsMember(&traits, marked, target->bbID)) { visitor->VisitNonTreeEdge(block, target, target->bbRefs > 1 @@ -1083,7 +1083,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { visitor->VisitTreeEdge(block, target); stack.Push(target); - BlockSetOps::AddElemD(this, marked, target->bbNum); + BitVecOps::AddElemD(&traits, marked, target->bbID); } } @@ -1093,7 +1093,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { BasicBlock* const target = scratch.Top(i); - if (BlockSetOps::IsMember(this, processed, i)) + if (BitVecOps::IsMember(&traits, processed, i)) { continue; } @@ -1103,9 +1103,9 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) continue; } - BlockSetOps::AddElemD(this, processed, i); + BitVecOps::AddElemD(&traits, processed, i); - if (BlockSetOps::IsMember(this, marked, target->bbNum)) + if (BitVecOps::IsMember(&traits, marked, target->bbID)) { visitor->VisitNonTreeEdge(block, target, SpanningTreeVisitor::EdgeKind::DominatesTarget); } @@ -1113,7 +1113,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { visitor->VisitTreeEdge(block, target); stack.Push(target); - BlockSetOps::AddElemD(this, marked, target->bbNum); + BitVecOps::AddElemD(&traits, marked, target->bbID); } } @@ -1123,14 +1123,14 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { BasicBlock* const target = scratch.Top(i); - if (BlockSetOps::IsMember(this, processed, i)) + if (BitVecOps::IsMember(&traits, processed, i)) { continue; } - BlockSetOps::AddElemD(this, processed, i); + BitVecOps::AddElemD(&traits, processed, i); - if (BlockSetOps::IsMember(this, marked, target->bbNum)) + if (BitVecOps::IsMember(&traits, marked, target->bbID)) { visitor->VisitNonTreeEdge(block, target, SpanningTreeVisitor::EdgeKind::CriticalEdge); } @@ -1138,13 +1138,13 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) { visitor->VisitTreeEdge(block, target); stack.Push(target); - BlockSetOps::AddElemD(this, marked, target->bbNum); + BitVecOps::AddElemD(&traits, marked, target->bbID); } } // Verify we processed each successor. // - assert(numSucc == BlockSetOps::Count(this, processed)); + assert(numSucc == BitVecOps::Count(&traits, processed)); } } break; @@ -1155,7 +1155,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) // for (BasicBlock* const block : Blocks()) { - if (!BlockSetOps::IsMember(this, marked, block->bbNum)) + if (!BitVecOps::IsMember(&traits, marked, block->bbID)) { visitor->VisitBlock(block); } diff --git a/src/coreclr/jit/jitpch.h b/src/coreclr/jit/jitpch.h index 6e9a0a6f800230..008eaf634f2557 100644 --- a/src/coreclr/jit/jitpch.h +++ b/src/coreclr/jit/jitpch.h @@ -48,7 +48,6 @@ using std::min; #include "rationalize.h" #include "jitstd.h" #include "ssaconfig.h" -#include "blockset.h" #include "bitvec.h" #include "inline.h" #include "objectalloc.h" diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index c8f26bec4b679d..ebd7850948482e 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -43,9 +43,6 @@ PhaseStatus Compiler::fgMorphInit() // compLocallocUsed = true; } - // Initialize the BlockSet epoch - NewBasicBlockEpoch(); - fgAvailableOutgoingArgTemps = hashBv::Create(this); // Insert call to class constructor as the first basic block if diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 56947263f86280..fa91d22e8346bc 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -976,8 +976,9 @@ struct JumpThreadInfo , m_trueTarget(block->GetTrueTarget()) , m_falseTarget(block->GetFalseTarget()) , m_ambiguousVNBlock(nullptr) - , m_truePreds(BlockSetOps::MakeEmpty(comp)) - , m_ambiguousPreds(BlockSetOps::MakeEmpty(comp)) + , traits(comp->m_dfsTree->PostOrderTraits()) + , m_truePreds(BitVecOps::MakeEmpty(&traits)) + , m_ambiguousPreds(BitVecOps::MakeEmpty(&traits)) , m_numPreds(0) , m_numAmbiguousPreds(0) , m_numTruePreds(0) @@ -995,11 +996,13 @@ struct JumpThreadInfo BasicBlock* const m_falseTarget; // Block that brings in the ambiguous VN BasicBlock* m_ambiguousVNBlock; + // Traits for the below BitVecs + BitVecTraits traits; // Pred blocks for which the predicate will be true - BlockSet m_truePreds; + BitVec m_truePreds; // Pred blocks that can't be threaded or for which the predicate // value can't be determined - BlockSet m_ambiguousPreds; + BitVec m_ambiguousPreds; // Total number of predecessors int m_numPreds; // Number of predecessors that can't be threaded or for which the predicate @@ -1024,12 +1027,6 @@ struct JumpThreadInfo // bool Compiler::optJumpThreadCheck(BasicBlock* const block, BasicBlock* const domBlock) { - if (fgCurBBEpochSize != (fgBBNumMax + 1)) - { - JITDUMP("Looks like we've added a new block (e.g. during optLoopHoist) since last renumber, so no threading\n"); - return false; - } - // If the block is the first block of try-region, then skip jump threading if (bbIsTryBeg(block)) { @@ -1286,7 +1283,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl if (predBlock->KindIs(BBJ_SWITCH)) { JITDUMP(FMT_BB " is a switch pred\n", predBlock->bbNum); - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); jti.m_numAmbiguousPreds++; continue; } @@ -1307,7 +1304,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // lead to more complications, and it isn't that common. So we tolerate it. // JITDUMP(FMT_BB " is an ambiguous pred\n", predBlock->bbNum); - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); jti.m_numAmbiguousPreds++; continue; } @@ -1318,12 +1315,12 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl { JITDUMP(FMT_BB " is an eh constrained pred\n", predBlock->bbNum); jti.m_numAmbiguousPreds++; - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); continue; } jti.m_numTruePreds++; - BlockSetOps::AddElemD(this, jti.m_truePreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_truePreds, predBlock->bbPostorderNum); JITDUMP(FMT_BB " is a true pred\n", predBlock->bbNum); } else @@ -1333,7 +1330,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl if (!BasicBlock::sameEHRegion(predBlock, jti.m_falseTarget)) { JITDUMP(FMT_BB " is an eh constrained pred\n", predBlock->bbNum); - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); jti.m_numAmbiguousPreds++; continue; } @@ -1515,7 +1512,7 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN { JITDUMP("Could not map phi inputs from pred " FMT_BB "\n", predBlock->bbNum); JITDUMP(FMT_BB " is an ambiguous pred\n", predBlock->bbNum); - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); jti.m_numAmbiguousPreds++; continue; } @@ -1547,12 +1544,12 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN { JITDUMP(FMT_BB " is an eh constrained pred\n", predBlock->bbNum); jti.m_numAmbiguousPreds++; - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); continue; } jti.m_numTruePreds++; - BlockSetOps::AddElemD(this, jti.m_truePreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_truePreds, predBlock->bbPostorderNum); JITDUMP(FMT_BB " is a true pred\n", predBlock->bbNum); } else @@ -1560,7 +1557,7 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN if (!BasicBlock::sameEHRegion(predBlock, jti.m_falseTarget)) { JITDUMP(FMT_BB " is an eh constrained pred\n", predBlock->bbNum); - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); jti.m_numAmbiguousPreds++; continue; } @@ -1572,7 +1569,7 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN else { JITDUMP(FMT_BB " is an ambiguous pred\n", predBlock->bbNum); - BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); + BitVecOps::AddElemD(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum); jti.m_numAmbiguousPreds++; // If this was the first ambiguous pred, remember the substVN @@ -1659,7 +1656,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) { // If this was an ambiguous pred, skip. // - if (BlockSetOps::IsMember(this, jti.m_ambiguousPreds, predBlock->bbNum)) + if (BitVecOps::IsMember(&jti.traits, jti.m_ambiguousPreds, predBlock->bbPostorderNum)) { if (setNoCseIn && !jti.m_block->HasFlag(BBF_NO_CSE_IN)) { @@ -1669,7 +1666,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) continue; } - const bool isTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, predBlock->bbNum); + const bool isTruePred = BitVecOps::IsMember(&jti.traits, jti.m_truePreds, predBlock->bbPostorderNum); // Jump to the appropriate successor. //