diff --git a/include/swift/SIL/Projection.h b/include/swift/SIL/Projection.h index b8d66d7983634..510a0cf78d9d0 100644 --- a/include/swift/SIL/Projection.h +++ b/include/swift/SIL/Projection.h @@ -106,6 +106,7 @@ enum class ProjectionKind : unsigned { Class = PointerIntEnumIndexKindValue<3, ProjectionKind>::value, Enum = PointerIntEnumIndexKindValue<4, ProjectionKind>::value, Box = PointerIntEnumIndexKindValue<5, ProjectionKind>::value, + Access = PointerIntEnumIndexKindValue<6, ProjectionKind>::value, LastIndexKind = Enum, }; @@ -129,6 +130,7 @@ static inline bool isCastProjectionKind(ProjectionKind Kind) { case ProjectionKind::Enum: case ProjectionKind::Box: case ProjectionKind::TailElems: + case ProjectionKind::Access: return false; } } @@ -428,6 +430,7 @@ class Projection { case ProjectionKind::TailElems: case ProjectionKind::Enum: case ProjectionKind::Box: + case ProjectionKind::Access: return false; } @@ -439,6 +442,7 @@ class Projection { case ProjectionKind::Class: case ProjectionKind::Enum: case ProjectionKind::Struct: + case ProjectionKind::Access: return true; case ProjectionKind::BitwiseCast: case ProjectionKind::Index: diff --git a/lib/SIL/Utils/InstructionUtils.cpp b/lib/SIL/Utils/InstructionUtils.cpp index b51e15605b172..a745f28b8b401 100644 --- a/lib/SIL/Utils/InstructionUtils.cpp +++ b/lib/SIL/Utils/InstructionUtils.cpp @@ -31,6 +31,7 @@ SILValue swift::stripOwnershipInsts(SILValue v) { return v; case ValueKind::CopyValueInst: case ValueKind::BeginBorrowInst: + case ValueKind::BeginAccessInst: v = cast(v)->getOperand(0); } } diff --git a/lib/SIL/Utils/Projection.cpp b/lib/SIL/Utils/Projection.cpp index 2881469102ea1..d531b13cea89d 100644 --- a/lib/SIL/Utils/Projection.cpp +++ b/lib/SIL/Utils/Projection.cpp @@ -169,6 +169,11 @@ Projection::Projection(SingleValueInstruction *I) : Value() { assert(getKind() == ProjectionKind::BitwiseCast); break; } + case SILInstructionKind::BeginAccessInst: { + Value = ValueTy(ProjectionKind::Access, uintptr_t(0)); + assert(getKind() == ProjectionKind::Access); + break; + } } } @@ -196,6 +201,7 @@ SILType Projection::getType(SILType BaseType, SILModule &M, case ProjectionKind::BitwiseCast: case ProjectionKind::TailElems: return getCastType(BaseType); + case ProjectionKind::Access: case ProjectionKind::Index: // Index types do not change the underlying type. return BaseType; @@ -237,6 +243,8 @@ Projection::createObjectProjection(SILBuilder &B, SILLocation Loc, return B.createUncheckedRefCast(Loc, Base, getCastType(BaseTy)); case ProjectionKind::BitwiseCast: return B.createUncheckedBitwiseCast(Loc, Base, getCastType(BaseTy)); + case ProjectionKind::Access: + return nullptr; } llvm_unreachable("Unhandled ProjectionKind in switch."); @@ -281,6 +289,8 @@ Projection::createAddressProjection(SILBuilder &B, SILLocation Loc, case ProjectionKind::RefCast: case ProjectionKind::BitwiseCast: return B.createUncheckedAddrCast(Loc, Base, getCastType(BaseTy)); + case ProjectionKind::Access: + return nullptr; } llvm_unreachable("Unhandled ProjectionKind in switch."); @@ -835,6 +845,10 @@ SILValue Projection::getOperandForAggregate(SILInstruction *I) const { } } break; + case ProjectionKind::Access: + if (auto access = dyn_cast(I)) + return access->getOperand(); + break; case ProjectionKind::Class: case ProjectionKind::TailElems: case ProjectionKind::Box: @@ -892,6 +906,7 @@ static bool isSupportedProjection(const Projection &p) { switch (p.getKind()) { case ProjectionKind::Struct: case ProjectionKind::Tuple: + case ProjectionKind::Access: return true; case ProjectionKind::Class: case ProjectionKind::Enum: diff --git a/lib/SILOptimizer/PassManager/PassPipeline.cpp b/lib/SILOptimizer/PassManager/PassPipeline.cpp index f2d717f4f4f72..2e1f4e29839c2 100644 --- a/lib/SILOptimizer/PassManager/PassPipeline.cpp +++ b/lib/SILOptimizer/PassManager/PassPipeline.cpp @@ -528,6 +528,8 @@ static void addLowLevelPassPipeline(SILPassPipelinePlan &P) { P.addDeadObjectElimination(); P.addObjectOutliner(); P.addDeadStoreElimination(); + P.addDCE(); + P.addDeadObjectElimination(); // We've done a lot of optimizations on this function, attempt to FSO. P.addFunctionSignatureOpts(); diff --git a/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp b/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp index 263af770e1443..9c3276bca929e 100644 --- a/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp +++ b/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp @@ -40,6 +40,12 @@ namespace { // FIXME: Reconcile the similarities between this and // isInstructionTriviallyDead. static bool seemsUseful(SILInstruction *I) { + if (auto access = dyn_cast(I)) + return access->getSingleUse() == nullptr && !access->use_empty(); + + if (isa(I)) + return I->getOperand(0)->getSingleUse() == nullptr; + if (I->mayHaveSideEffects()) return true; diff --git a/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp b/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp index 99c4ba46cf169..b298ffe17b711 100644 --- a/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp +++ b/lib/SILOptimizer/Transforms/DeadStoreElimination.cpp @@ -161,6 +161,8 @@ static bool isDeadStoreInertInstruction(SILInstruction *Inst) { case SILInstructionKind::DeallocRefInst: case SILInstructionKind::CondFailInst: case SILInstructionKind::FixLifetimeInst: + case SILInstructionKind::EndAccessInst: + case SILInstructionKind::SetDeallocatingInst: return true; default: return false; @@ -888,8 +890,13 @@ bool DSEContext::processWriteForDSE(BlockState *S, unsigned bit) { continue; // If 2 locations may alias, we can still keep both stores. LSLocation &L = LocationVault[i]; - if (!L.isMustAliasLSLocation(R, AA)) + if (!L.isMustAliasLSLocation(R, AA)) { + // If this is a reference type, then the *live* store counts as an unkown + // read. + if (L.getBase()->getType().isAnyClassReferenceType()) + S->stopTrackingLocation(S->BBWriteSetMid, i); continue; + } // There is a must alias store. No need to check further. StoreDead = true; break; @@ -1087,7 +1094,12 @@ void DSEContext::processUnknownReadInstForGenKillSet(SILInstruction *I) { for (unsigned i = 0; i < S->LocationNum; ++i) { if (!S->BBMaxStoreSet.test(i)) continue; - if (!AA->mayReadFromMemory(I, LocationVault[i].getBase())) + auto val = LocationVault[i].getBase(); + if (!AA->mayReadFromMemory(I, val)) + continue; + if (llvm::all_of(I->getAllOperands(), [&AA = AA, &val](Operand &op) { + return AA->isNoAlias(op.get(), val); + })) continue; // Update the genset and kill set. S->startTrackingLocation(S->BBKillSet, i); @@ -1100,7 +1112,12 @@ void DSEContext::processUnknownReadInstForDSE(SILInstruction *I) { for (unsigned i = 0; i < S->LocationNum; ++i) { if (!S->isTrackingLocation(S->BBWriteSetMid, i)) continue; - if (!AA->mayReadFromMemory(I, LocationVault[i].getBase())) + auto val = LocationVault[i].getBase(); + if (!AA->mayReadFromMemory(I, val)) + continue; + if (llvm::all_of(I->getAllOperands(), [&AA = AA, &val](Operand &op) { + return AA->isNoAlias(op.get(), val); + })) continue; S->stopTrackingLocation(S->BBWriteSetMid, i); } @@ -1140,6 +1157,28 @@ void DSEContext::processInstruction(SILInstruction *I, DSEKind Kind) { processStoreInst(I, Kind); } else if (isa(I)) { processDebugValueAddrInst(I, Kind); + } else if (isa(I)) { + // Do the same thing here as in RLE. Begin access writes/reads memory only + // if one of its users writes/reads memory. We will look at all of its users + // and we can project to the source memory location so, if there are any + // actual writes/reads of memory we will catch them there so, we can ignore + // begin_access here. + return; + } else if (auto *release = dyn_cast(I)) { + // If the strong release operand type can't have a custom destructor it's + // OK. + if (release->getOperand()->getType().getClassOrBoundGenericClass() == + nullptr) + return; + // For strong releases, we have to prove that the strong release won't + // envoke the destructor. For now, we just try to find a set_deallocating + // instruction that indicates the life-ending strong_release has been + // devirtualized. TODO: there should be a better way to do this. + for (auto *use : release->getOperand()->getUses()) { + if (isa(use->getUser())) + return; + } + processUnknownReadInst(I, Kind); } else if (I->mayReadFromMemory()) { processUnknownReadInst(I, Kind); } diff --git a/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp b/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp index e446ac4a53ec2..fa71ea6838c8f 100644 --- a/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp +++ b/lib/SILOptimizer/Transforms/RedundantLoadElimination.cpp @@ -160,6 +160,9 @@ static bool isRLEInertInstruction(SILInstruction *Inst) { case SILInstructionKind::IsEscapingClosureInst: case SILInstructionKind::IsUniqueInst: case SILInstructionKind::FixLifetimeInst: + case SILInstructionKind::EndAccessInst: + case SILInstructionKind::SetDeallocatingInst: + case SILInstructionKind::DeallocRefInst: return true; default: return false; @@ -977,6 +980,10 @@ void BlockState::processUnknownWriteInstForGenKillSet(RLEContext &Ctx, LSLocation &R = Ctx.getLocation(i); if (!AA->mayWriteToMemory(I, R.getBase())) continue; + if (llvm::all_of(I->getAllOperands(), [&AA, &R](Operand &op) { + return AA->isNoAlias(op.get(), R.getBase()); + })) + continue; // MayAlias. stopTrackingLocation(BBGenSet, i); startTrackingLocation(BBKillSet, i); @@ -996,6 +1003,10 @@ void BlockState::processUnknownWriteInstForRLE(RLEContext &Ctx, LSLocation &R = Ctx.getLocation(i); if (!AA->mayWriteToMemory(I, R.getBase())) continue; + if (llvm::all_of(I->getAllOperands(), [&AA, &R](Operand &op) { + return AA->isNoAlias(op.get(), R.getBase()); + })) + continue; // MayAlias. stopTrackingLocation(ForwardSetIn, i); stopTrackingValue(ForwardValIn, i); @@ -1089,6 +1100,18 @@ void BlockState::processInstructionWithKind(RLEContext &Ctx, return; } + // Begin access writes to memory only if one of its users writes to memory. + // We will look at all of its users and we can project to the source memory + // location so, if there are any actual writes to memory we will catch them + // there so, we can ignore begin_access here. + if (isa(Inst)) + return; + + // If the load is valid, then this strong release won't invoke the destructor. + // So we can ignore it. + if (isa(Inst)) + return; + // If this instruction has side effects, but is inert from a load store // perspective, skip it. if (isRLEInertInstruction(Inst)) diff --git a/lib/SILOptimizer/Transforms/ReleaseDevirtualizer.cpp b/lib/SILOptimizer/Transforms/ReleaseDevirtualizer.cpp index 3642e337053fc..0f96c6764bfd7 100644 --- a/lib/SILOptimizer/Transforms/ReleaseDevirtualizer.cpp +++ b/lib/SILOptimizer/Transforms/ReleaseDevirtualizer.cpp @@ -70,13 +70,11 @@ void ReleaseDevirtualizer::run() { SILFunction *F = getFunction(); RCIA = PM->getAnalysis()->get(F); + // The last release_value or strong_release instruction before the + // deallocation. + SILInstruction *LastRelease = nullptr; bool Changed = false; for (SILBasicBlock &BB : *F) { - - // The last release_value or strong_release instruction before the - // deallocation. - SILInstruction *LastRelease = nullptr; - for (SILInstruction &I : BB) { if (LastRelease) { if (auto *DRI = dyn_cast(&I)) { diff --git a/lib/SILOptimizer/Transforms/StackPromotion.cpp b/lib/SILOptimizer/Transforms/StackPromotion.cpp index a7579edc39094..e86ca2b5ce500 100644 --- a/lib/SILOptimizer/Transforms/StackPromotion.cpp +++ b/lib/SILOptimizer/Transforms/StackPromotion.cpp @@ -140,6 +140,12 @@ bool StackPromotion::tryPromoteAlloc(AllocRefInst *ARI, EscapeAnalysis *EA, LLVM_DEBUG(llvm::dbgs() << " uses don't post-dom allocation -> don't promote"); return false; } + + if (Frontier.empty()) { + // TODO: assert(false); + return false; + } + NumStackPromoted++; // We set the [stack] attribute in the alloc_ref.