diff --git a/actors/builtin/cbor_gen.go b/actors/builtin/cbor_gen.go index bac9729a3..844aa9a7b 100644 --- a/actors/builtin/cbor_gen.go +++ b/actors/builtin/cbor_gen.go @@ -158,8 +158,8 @@ func (t *ConfirmSectorProofsParams) MarshalCBOR(w io.Writer) error { return err } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.QualityAdjPowerSmoothed.MarshalCBOR(w); err != nil { + // t.RawBytePowerSmoothed (smoothing.FilterEstimate) (struct) + if err := t.RawBytePowerSmoothed.MarshalCBOR(w); err != nil { return err } return nil @@ -234,12 +234,12 @@ func (t *ConfirmSectorProofsParams) UnmarshalCBOR(r io.Reader) error { } } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) + // t.RawBytePowerSmoothed (smoothing.FilterEstimate) (struct) { - if err := t.QualityAdjPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPowerSmoothed: %w", err) + if err := t.RawBytePowerSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RawBytePowerSmoothed: %w", err) } } @@ -277,8 +277,8 @@ func (t *DeferredCronEventParams) MarshalCBOR(w io.Writer) error { return err } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.QualityAdjPowerSmoothed.MarshalCBOR(w); err != nil { + // t.RawBytePowerSmoothed (smoothing.FilterEstimate) (struct) + if err := t.RawBytePowerSmoothed.MarshalCBOR(w); err != nil { return err } return nil @@ -332,12 +332,12 @@ func (t *DeferredCronEventParams) UnmarshalCBOR(r io.Reader) error { } } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) + // t.RawBytePowerSmoothed (smoothing.FilterEstimate) (struct) { - if err := t.QualityAdjPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPowerSmoothed: %w", err) + if err := t.RawBytePowerSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RawBytePowerSmoothed: %w", err) } } diff --git a/actors/builtin/market/market_actor.go b/actors/builtin/market/market_actor.go index cb6ff9e61..4c7f21c64 100644 --- a/actors/builtin/market/market_actor.go +++ b/actors/builtin/market/market_actor.go @@ -33,7 +33,7 @@ func (a Actor) Exports() []interface{} { 2: a.AddBalance, 3: a.WithdrawBalance, 4: a.PublishStorageDeals, - 5: a.VerifyDealsForActivation, + 5: nil, // Deprecated 6: a.ActivateDeals, 7: a.OnMinerSectorsTerminate, 8: a.ComputeDataCommitment, @@ -295,6 +295,8 @@ func (a Actor) PublishStorageDeals(rt Runtime, params *PublishStorageDealsParams "%d valid deals but validDealCount=%d", len(validDeals), validDealCount) builtin.RequireParam(rt, validDealCount > 0, "All deal proposals invalid") + // FIXME add verified deal pledge + var newDealIds []abi.DealID rt.StateTransaction(&st, func() { msm, err := st.mutator(adt.AsStore(rt)).withPendingProposals(WritePermission). @@ -337,68 +339,6 @@ func (a Actor) PublishStorageDeals(rt Runtime, params *PublishStorageDealsParams } } -// Changed since v2: -// - Array of sectors rather than just one -// - Removed SectorStart (which is unknown at call time) -type VerifyDealsForActivationParams struct { - Sectors []SectorDeals -} - -type SectorDeals struct { - SectorExpiry abi.ChainEpoch - DealIDs []abi.DealID -} - -// Changed since v2: -// - Array of sectors weights -type VerifyDealsForActivationReturn struct { - Sectors []SectorWeights -} - -type SectorWeights struct { - DealSpace uint64 // Total space in bytes of submitted deals. - DealWeight abi.DealWeight // Total space*time of submitted deals. - VerifiedDealWeight abi.DealWeight // Total space*time of submitted verified deals. -} - -// Computes the weight of deals proposed for inclusion in a number of sectors. -// Deal weight is defined as the sum, over all deals in the set, of the product of deal size and duration. -// -// This method performs some light validation on the way in order to fail early if deals can be -// determined to be invalid for the proposed sector properties. -// Full deal validation is deferred to deal activation since it depends on the activation epoch. -func (a Actor) VerifyDealsForActivation(rt Runtime, params *VerifyDealsForActivationParams) *VerifyDealsForActivationReturn { - rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) - minerAddr := rt.Caller() - currEpoch := rt.CurrEpoch() - - var st State - rt.StateReadonly(&st) - store := adt.AsStore(rt) - - proposals, err := AsDealProposalArray(store, st.Proposals) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deal proposals") - - weights := make([]SectorWeights, len(params.Sectors)) - for i, sector := range params.Sectors { - // Pass the current epoch as the activation epoch for validation. - // The sector activation epoch isn't yet known, but it's still more helpful to fail now if the deal - // is so late that a sector activating now couldn't include it. - dealWeight, verifiedWeight, dealSpace, err := validateAndComputeDealWeight(proposals, sector.DealIDs, minerAddr, sector.SectorExpiry, currEpoch) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to validate deal proposals for activation") - - weights[i] = SectorWeights{ - DealSpace: dealSpace, - DealWeight: dealWeight, - VerifiedDealWeight: verifiedWeight, - } - } - - return &VerifyDealsForActivationReturn{ - Sectors: weights, - } -} - //type ActivateDealsParams struct { // DealIDs []abi.DealID // SectorExpiry abi.ChainEpoch @@ -417,11 +357,12 @@ func (a Actor) ActivateDeals(rt Runtime, params *ActivateDealsParams) *abi.Empty // Update deal dealStates. rt.StateTransaction(&st, func() { - _, _, _, err := ValidateDealsForActivation(&st, store, params.DealIDs, minerAddr, params.SectorExpiry, currEpoch) + err := ValidateDealsForActivation(&st, store, params.DealIDs, minerAddr, params.SectorExpiry, currEpoch) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to validate dealProposals for activation") msm, err := st.mutator(adt.AsStore(rt)).withDealStates(WritePermission). - withPendingProposals(ReadOnlyPermission).withDealProposals(ReadOnlyPermission).build() + withPendingProposals(ReadOnlyPermission).withDealProposals(ReadOnlyPermission). + withVerifiedRewards(WritePermission).build() builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") for _, dealID := range params.DealIDs { @@ -452,6 +393,24 @@ func (a Actor) ActivateDeals(rt Runtime, params *ActivateDealsParams) *abi.Empty SlashEpoch: epochUndefined, }) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal state %d", dealID) + + // Schedule verified space activation/deactivation events for the deal's nominal lifespan. + if proposal.VerifiedDeal { + verifiedSpace := big.NewIntUnsigned(uint64(proposal.PieceSize)) + // Schedule delta to total verified space at deal activation. + err = msm.totalVerifiedSpaceDeltas.Add(proposal.StartEpoch, verifiedSpace) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add verified space delta %v at %d", verifiedSpace, proposal.StartEpoch) + // Schedule inverse delta to total verified space at deal expiration. + err = msm.totalVerifiedSpaceDeltas.Add(proposal.EndEpoch, verifiedSpace.Neg()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add verified space delta %v at %d", verifiedSpace.Neg(), proposal.EndEpoch) + + // Schedule deltas to the provider's claim. + // TODO: consider a method that adds both of these in one go, avoid a storage write + err = msm.providerVerifiedClaims.Add(store, proposal.Provider, proposal.StartEpoch, verifiedSpace) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to enqueue verified claim activation for %d", dealID) + err = msm.providerVerifiedClaims.Add(store, proposal.Provider, proposal.EndEpoch, verifiedSpace.Neg()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to enqueue verified claim termination for %d", dealID) + } } err = msm.commitState() @@ -516,9 +475,10 @@ func (a Actor) OnMinerSectorsTerminate(rt Runtime, params *OnMinerSectorsTermina minerAddr := rt.Caller() var st State + store := adt.AsStore(rt) rt.StateTransaction(&st, func() { - msm, err := st.mutator(adt.AsStore(rt)).withDealStates(WritePermission). - withDealProposals(ReadOnlyPermission).build() + msm, err := st.mutator(store).withDealStates(WritePermission). + withDealProposals(ReadOnlyPermission).withVerifiedRewards(WritePermission).build() builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deal state") for _, dealID := range params.DealIDs { @@ -557,6 +517,28 @@ func (a Actor) OnMinerSectorsTerminate(rt Runtime, params *OnMinerSectorsTermina // actual releasing of locked funds for the client and slashing of provider collateral happens in CronTick. state.SlashEpoch = params.Epoch + // Bring forward the termination of verified power. + // FIXME: apply a penalty to verified deal pledge + if deal.VerifiedDeal { + // The provided nominal termination epoch is ignored because that verified space total has already been + // calculated and used for reward distribution. The best we can do is subtract the power from the + // current epoch. + currentEpoch := rt.CurrEpoch() + verifiedSpace := big.NewIntUnsigned(uint64(deal.PieceSize)) + // Subtract the verified space from the current epoch. + err = msm.totalVerifiedSpaceDeltas.Add(currentEpoch, verifiedSpace.Neg()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to subtract verified space delta %v at %d", verifiedSpace.Neg(), currentEpoch) + // Add back the verified space from the epoch where the deal expiration was originally scheduled. + err = msm.totalVerifiedSpaceDeltas.Add(deal.EndEpoch, verifiedSpace) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add verified space delta %v at %d", verifiedSpace, currentEpoch) + + // Similarly, bring forward termination of the provider's verified claim. + err = msm.providerVerifiedClaims.Add(store, deal.Provider, currentEpoch, verifiedSpace.Neg()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to enqueue verified claim activation for %d", dealID) + err = msm.providerVerifiedClaims.Add(store, deal.Provider, deal.EndEpoch, verifiedSpace) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to enqueue verified claim termination for %d", dealID) + } + err = msm.dealStates.Set(dealID, state) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal state %v", dealID) } @@ -573,16 +555,21 @@ func (a Actor) CronTick(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { var timedOutVerifiedDeals []*DealProposal + currEpoch := rt.CurrEpoch() + prevTotalVerifiedSpace := big.Zero() + newTotalVerifiedSpace := big.Zero() var st State rt.StateTransaction(&st, func() { updatesNeeded := make(map[abi.ChainEpoch][]abi.DealID) msm, err := st.mutator(adt.AsStore(rt)).withDealStates(WritePermission). withLockedTable(WritePermission).withEscrowTable(WritePermission).withDealsByEpoch(WritePermission). - withDealProposals(WritePermission).withPendingProposals(WritePermission).build() + withDealProposals(WritePermission).withPendingProposals(WritePermission). + withVerifiedRewards(WritePermission).build() builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") - for i := st.LastCron + 1; i <= rt.CurrEpoch(); i++ { + for i := st.LastCron + 1; i <= currEpoch; i++ { + // Process incremental deal payments. err = msm.dealsByEpoch.ForEach(i, func(dealID abi.DealID) error { deal, err := getDealProposal(msm.dealProposals, dealID) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get dealId %d", dealID) @@ -596,7 +583,7 @@ func (a Actor) CronTick(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { // deal has been published but not activated yet -> terminate it as it has timed out if !found { // Not yet appeared in proven sector; check for timeout. - builtin.RequireState(rt, rt.CurrEpoch() >= deal.StartEpoch, "deal %d processed before start epoch %d", + builtin.RequireState(rt, currEpoch >= deal.StartEpoch, "deal %d processed before start epoch %d", dealID, deal.StartEpoch) slashed := msm.processDealInitTimedOut(rt, deal) @@ -622,7 +609,7 @@ func (a Actor) CronTick(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { builtin.RequireNoErr(rt, pdErr, exitcode.ErrIllegalState, "failed to delete pending proposal %v", dcid) } - slashAmount, nextEpoch, removeDeal := msm.updatePendingDealState(rt, state, deal, rt.CurrEpoch()) + slashAmount, nextEpoch, removeDeal := msm.updatePendingDealState(rt, state, deal, currEpoch) builtin.RequireState(rt, slashAmount.GreaterThanEqual(big.Zero()), "computed negative slash amount %v for deal %d", slashAmount, dealID) if removeDeal { @@ -635,11 +622,11 @@ func (a Actor) CronTick(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { err = msm.dealProposals.Delete(dealID) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete deal proposal %d", dealID) } else { - builtin.RequireState(rt, nextEpoch > rt.CurrEpoch(), "continuing deal %d next epoch %d should be in future", dealID, nextEpoch) + builtin.RequireState(rt, nextEpoch > currEpoch, "continuing deal %d next epoch %d should be in future", dealID, nextEpoch) builtin.RequireState(rt, slashAmount.IsZero(), "continuing deal %d should not be slashed", dealID) // Update deal's LastUpdatedEpoch in DealStates - state.LastUpdatedEpoch = rt.CurrEpoch() + state.LastUpdatedEpoch = currEpoch err = msm.dealStates.Set(dealID, state) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal state") @@ -669,7 +656,45 @@ func (a Actor) CronTick(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to reinsert deal IDs for epoch %v", epoch) } - st.LastCron = rt.CurrEpoch() + // Compute total verified space for this epoch by popping the queue entry for this epoch and adding to + // the running total. + // NOTE: this assumes that st.LastCron == current epoch. Code above, which does not, is + // a hangover from when cron was not executed for empty tipsets. + prevTotalVerifiedSpace = msm.totalVerifiedSpace + verifiedSpaceDelta := big.Zero() + _, err = msm.totalVerifiedSpaceDeltas.Pop(uint64(currEpoch), &verifiedSpaceDelta) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verified space deltas at %d", currEpoch) + newTotalVerifiedSpace = big.Add(msm.totalVerifiedSpace, verifiedSpaceDelta) + builtin.RequireState(rt, newTotalVerifiedSpace.GreaterThanEqual(big.Zero()), + "total verified space went negative %v at %d", msm.totalVerifiedSpace, currEpoch) + msm.totalVerifiedSpace = newTotalVerifiedSpace + + st.LastCron = currEpoch + err = msm.commitState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") + }) + + // Claim verified deal reward for this past epoch (which is based on the total verified space at the + // end of the prior one), and advice the reward actor of the new total verified space for the next + // epoch's reward. + var thisEpochReward big.Int + code := rt.Send( + builtin.RewardActorAddr, + builtin.MethodsReward.ClaimVerifiedDealReward, + &newTotalVerifiedSpace, + abi.NewTokenAmount(0), + &thisEpochReward) + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to claim verified deal reward, got code %v", code) + } + + rt.StateTransaction(&st, func() { + msm, err := st.mutator(adt.AsStore(rt)).withVerifiedRewards(WritePermission).build() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") + + // FIXME purge history beyond some window + err = msm.verifiedRewardHistory.Set(currEpoch, prevTotalVerifiedSpace, thisEpochReward) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set verified reward history for %d as space %v, reward %v", currEpoch, newTotalVerifiedSpace, thisEpochReward) err = msm.commitState() builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") @@ -701,6 +726,22 @@ func (a Actor) CronTick(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { return nil } +//// Computes and returns the total verified space at the end of this epoch. +//// This is expected to be called exactly once each epoch +//func (a Actor) TotalVerifiedSpace(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { +// rt.ValidateImmediateCallerIs(builtin.RewardActorAddr) +// +// msm, err := st.mutator(adt.AsStore(rt)).withDealStates(WritePermission). +// withLockedTable(WritePermission).withEscrowTable(WritePermission).withDealsByEpoch(WritePermission). +// withDealProposals(WritePermission).withPendingProposals(WritePermission).build() +// builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") +// +//} + +// +// Exported functions +// + func GenRandNextEpoch(startEpoch abi.ChainEpoch, dealID abi.DealID) abi.ChainEpoch { offset := abi.ChainEpoch(uint64(dealID) % uint64(DealUpdatesInterval)) q := builtin.NewQuantSpec(DealUpdatesInterval, 0) @@ -712,77 +753,45 @@ func GenRandNextEpoch(startEpoch abi.ChainEpoch, dealID abi.DealID) abi.ChainEpo return nextDay + offset } -// -// Exported functions -// - -// Validates a collection of deal dealProposals for activation, and returns their combined weight, -// split into regular deal weight and verified deal weight. -func ValidateDealsForActivation( - st *State, store adt.Store, dealIDs []abi.DealID, minerAddr addr.Address, sectorExpiry, currEpoch abi.ChainEpoch, -) (big.Int, big.Int, uint64, error) { +// Validates a collection of deal proposals for activation. +func ValidateDealsForActivation(st *State, store adt.Store, dealIDs []abi.DealID, minerAddr addr.Address, sectorExpiration, currEpoch abi.ChainEpoch) error { proposals, err := AsDealProposalArray(store, st.Proposals) if err != nil { - return big.Int{}, big.Int{}, 0, xerrors.Errorf("failed to load dealProposals: %w", err) + return xerrors.Errorf("failed to load dealProposals: %w", err) } - return validateAndComputeDealWeight(proposals, dealIDs, minerAddr, sectorExpiry, currEpoch) -} - -//////////////////////////////////////////////////////////////////////////////// -// Checks -//////////////////////////////////////////////////////////////////////////////// - -func validateAndComputeDealWeight(proposals *DealArray, dealIDs []abi.DealID, minerAddr addr.Address, - sectorExpiry abi.ChainEpoch, sectorActivation abi.ChainEpoch) (big.Int, big.Int, uint64, error) { - seenDealIDs := make(map[abi.DealID]struct{}, len(dealIDs)) - totalDealSpace := uint64(0) - totalDealSpaceTime := big.Zero() - totalVerifiedSpaceTime := big.Zero() for _, dealID := range dealIDs { // Make sure we don't double-count deals. if _, seen := seenDealIDs[dealID]; seen { - return big.Int{}, big.Int{}, 0, exitcode.ErrIllegalArgument.Wrapf("deal ID %d present multiple times", dealID) + return exitcode.ErrIllegalArgument.Wrapf("deal ID %d present multiple times", dealID) } seenDealIDs[dealID] = struct{}{} proposal, found, err := proposals.Get(dealID) if err != nil { - return big.Int{}, big.Int{}, 0, xerrors.Errorf("failed to load deal %d: %w", dealID, err) + return xerrors.Errorf("failed to load deal %d: %w", dealID, err) } if !found { - return big.Int{}, big.Int{}, 0, exitcode.ErrNotFound.Wrapf("no such deal %d", dealID) + return exitcode.ErrNotFound.Wrapf("no such deal %d", dealID) } - if err = validateDealCanActivate(proposal, minerAddr, sectorExpiry, sectorActivation); err != nil { - return big.Int{}, big.Int{}, 0, xerrors.Errorf("cannot activate deal %d: %w", dealID, err) + if proposal.Provider != minerAddr { + return exitcode.ErrForbidden.Wrapf("proposal has provider %v, must be %v", proposal.Provider, minerAddr) } - - // Compute deal weight - totalDealSpace += uint64(proposal.PieceSize) - dealSpaceTime := DealWeight(proposal) - if proposal.VerifiedDeal { - totalVerifiedSpaceTime = big.Add(totalVerifiedSpaceTime, dealSpaceTime) - } else { - totalDealSpaceTime = big.Add(totalDealSpaceTime, dealSpaceTime) + if currEpoch > proposal.StartEpoch { + return exitcode.ErrIllegalArgument.Wrapf("proposal start epoch %d has already elapsed at %d", proposal.StartEpoch, currEpoch) + } + if proposal.EndEpoch > sectorExpiration { + return exitcode.ErrIllegalArgument.Wrapf("proposal expiration %d exceeds sector expiration %d", proposal.EndEpoch, sectorExpiration) } - } - return totalDealSpaceTime, totalVerifiedSpaceTime, totalDealSpace, nil -} - -func validateDealCanActivate(proposal *DealProposal, minerAddr addr.Address, sectorExpiration, sectorActivation abi.ChainEpoch) error { - if proposal.Provider != minerAddr { - return exitcode.ErrForbidden.Wrapf("proposal has provider %v, must be %v", proposal.Provider, minerAddr) - } - if sectorActivation > proposal.StartEpoch { - return exitcode.ErrIllegalArgument.Wrapf("proposal start epoch %d has already elapsed at %d", proposal.StartEpoch, sectorActivation) - } - if proposal.EndEpoch > sectorExpiration { - return exitcode.ErrIllegalArgument.Wrapf("proposal expiration %d exceeds sector expiration %d", proposal.EndEpoch, sectorExpiration) } return nil } +//////////////////////////////////////////////////////////////////////////////// +// Checks +//////////////////////////////////////////////////////////////////////////////// + func validateDeal(rt Runtime, deal ClientDealProposal, networkRawPower, networkQAPower, baselinePower abi.StoragePower) error { if err := dealProposalIsInternallyValid(rt, deal); err != nil { return xerrors.Errorf("Invalid deal proposal %w", err) @@ -889,5 +898,5 @@ func requestCurrentNetworkPower(rt Runtime) (rawPower, qaPower abi.StoragePower) var pwr power.CurrentTotalPowerReturn code := rt.Send(builtin.StoragePowerActorAddr, builtin.MethodsPower.CurrentTotalPower, nil, big.Zero(), &pwr) builtin.RequireSuccess(rt, code, "failed to check current power") - return pwr.RawBytePower, pwr.QualityAdjPower + return pwr.RawBytePower, big.Zero() } diff --git a/actors/builtin/market/market_state.go b/actors/builtin/market/market_state.go index e0a16c804..ecde9c9df 100644 --- a/actors/builtin/market/market_state.go +++ b/actors/builtin/market/market_state.go @@ -27,6 +27,9 @@ const ( // Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. const ProposalsAmtBitwidth = 5 const StatesAmtBitwidth = 6 +const VerifiedRewardsHistoryAmtBitwidth = 5 // FIXME calibrate this +const ProviderVerifiedClaimsHamtBitwidth = 5 // FIXME calibrate this +const StoragePowerDeltaQueueAmtBitwidth = 5 // FIXME calibrate this type State struct { // Proposals are deals that have been proposed and not yet cleaned up after expiry or termination. @@ -60,6 +63,16 @@ type State struct { TotalProviderLockedCollateral abi.TokenAmount // Total storage fee that is locked in escrow -> unlocked when payments are made TotalClientStorageFee abi.TokenAmount + + // Sum of space occupied by active verified deals at end of last epoch. + TotalVerifiedSpace abi.StoragePower + // Queue of deltas to total verified space in the current or future epochs. + // The sum of TotalVerifiedSpace plus all deltas should equal zero, never dipping below zero in the interim. + TotalVerifiedSpaceDeltas cid.Cid // AMT[epoch]big.Int + // Recent history of past total verified space and verified reward received + VerifiedRewardsHistory cid.Cid // AMT[epoch]VerifiedDealTotals + // Provider verified deal claims + ProviderVerifiedClaims cid.Cid // HAMT[actorID]{...} } func ConstructState(store adt.Store) (*State, error) { @@ -85,6 +98,19 @@ func ConstructState(store adt.Store) (*State, error) { return nil, xerrors.Errorf("failed to create empty balance table: %w", err) } + emptyStoragePowerDeltaQueueCid, err := adt.StoreEmptyArray(store, StoragePowerDeltaQueueAmtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty array: %w", err) + } + emptyVerifiedRewardsHistoryCid, err := adt.StoreEmptyArray(store, VerifiedRewardsHistoryAmtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty array: %w", err) + } + emptyProviderVerifiedClaimsCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty map: %w", err) + } + return &State{ Proposals: emptyProposalsArrayCid, States: emptyStatesArrayCid, @@ -98,6 +124,11 @@ func ConstructState(store adt.Store) (*State, error) { TotalClientLockedCollateral: abi.NewTokenAmount(0), TotalProviderLockedCollateral: abi.NewTokenAmount(0), TotalClientStorageFee: abi.NewTokenAmount(0), + + TotalVerifiedSpace: big.Zero(), + TotalVerifiedSpaceDeltas: emptyStoragePowerDeltaQueueCid, + VerifiedRewardsHistory: emptyVerifiedRewardsHistoryCid, + ProviderVerifiedClaims: emptyProviderVerifiedClaimsCid, }, nil } @@ -297,6 +328,12 @@ type marketStateMutation struct { totalClientStorageFee abi.TokenAmount nextDealId abi.DealID + + verifiedPermit MarketStateMutationPermission + totalVerifiedSpace abi.StoragePower + totalVerifiedSpaceDeltas *StoragePowerDeltaQueue + verifiedRewardHistory *VerifiedRewardHistory + providerVerifiedClaims *ProviderVerifiedClaims } func (s *State) mutator(store adt.Store) *marketStateMutation { @@ -355,6 +392,27 @@ func (m *marketStateMutation) build() (*marketStateMutation, error) { m.dealsByEpoch = dbe } + if m.verifiedPermit != Invalid { + m.totalVerifiedSpace = m.st.TotalVerifiedSpace + tvsq, err := AsStoragePowerDeltaQueue(m.store, m.st.TotalVerifiedSpaceDeltas) + if err != nil { + return nil, xerrors.Errorf("failed to load verified space deltas: %w", err) + } + m.totalVerifiedSpaceDeltas = tvsq + + hvr, err := AsVerifiedRewardHistory(m.store, m.st.VerifiedRewardsHistory) + if err != nil { + return nil, xerrors.Errorf("failed to load historical verified rewards: %w", err) + } + m.verifiedRewardHistory = hvr + + pvc, err := AsProviderVerifiedClaims(m.store, m.st.ProviderVerifiedClaims) + if err != nil { + return nil, xerrors.Errorf("failed to load provider verified claims: %w", err) + } + m.providerVerifiedClaims = pvc + } + m.nextDealId = m.st.NextID return m, nil @@ -390,6 +448,11 @@ func (m *marketStateMutation) withDealsByEpoch(permit MarketStateMutationPermiss return m } +func (m *marketStateMutation) withVerifiedRewards(permit MarketStateMutationPermission) *marketStateMutation { + m.verifiedPermit = permit + return m +} + func (m *marketStateMutation) commitState() error { var err error if m.proposalPermit == WritePermission { @@ -431,6 +494,19 @@ func (m *marketStateMutation) commitState() error { } } + if m.verifiedPermit == WritePermission { + m.st.TotalVerifiedSpace = m.totalVerifiedSpace + if m.st.TotalVerifiedSpaceDeltas, err = m.totalVerifiedSpaceDeltas.Root(); err != nil { + return xerrors.Errorf("failed to flush verified space deltas: %w", err) + } + if m.st.VerifiedRewardsHistory, err = m.verifiedRewardHistory.Root(); err != nil { + return xerrors.Errorf("failed to flush historical verified rewards: %w", err) + } + if m.st.ProviderVerifiedClaims, err = m.providerVerifiedClaims.Root(); err != nil { + return xerrors.Errorf("failed to flush provider verified claims: %w", err) + } + } + m.st.NextID = m.nextDealId return nil } diff --git a/actors/builtin/market/types.go b/actors/builtin/market/types.go index a191f6c40..a23da5df2 100644 --- a/actors/builtin/market/types.go +++ b/actors/builtin/market/types.go @@ -1,12 +1,19 @@ package market import ( + "io" + + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" . "github.com/filecoin-project/specs-actors/v8/actors/util/adt" + "golang.org/x/xerrors" "github.com/ipfs/go-cid" ) +///// Deal proposal array ///// + // A specialization of a array to deals. // It is an error to query for a key that doesn't exist. type DealArray struct { @@ -42,6 +49,8 @@ func (t *DealArray) Delete(id abi.DealID) error { return t.Array.Delete(uint64(id)) } +///// Deal state array ///// + // A specialization of a array to deals. // It is an error to query for a key that doesn't exist. type DealMetaArray struct { @@ -93,3 +102,176 @@ func (t *DealMetaArray) Set(k abi.DealID, value *DealState) error { func (t *DealMetaArray) Delete(id abi.DealID) error { return t.Array.Delete(uint64(id)) } + +///// Provider verified claims map ///// + +// A specialization of a map to provider verified claims. +// It is an error to query for a key that doesn't exist. +type ProviderVerifiedClaims struct { + *Map +} + +type ProviderVerifiedClaim struct { + // The epoch until which rewards were last processed for the provider. + LastClaimEpoch abi.ChainEpoch + // The provider's active verified deal space at the end of LastClaimEpoch. + VerifiedDealSpace abi.StoragePower + // Events since LastClaimEpoch, keyed by epoch. + DeltaQueue cid.Cid // AMT[epoch]big.Int +} + +// Interprets a store as a provider verified claims map with root `r`. +func AsProviderVerifiedClaims(s Store, r cid.Cid) (*ProviderVerifiedClaims, error) { + pvc, err := AsMap(s, r, ProviderVerifiedClaimsHamtBitwidth) + if err != nil { + return nil, err + } + return &ProviderVerifiedClaims{pvc}, nil +} + +func (pvc *ProviderVerifiedClaims) Add(s Store, provider address.Address, epoch abi.ChainEpoch, size abi.StoragePower) error { + var providerClaim ProviderVerifiedClaim + if found, err := pvc.Get(abi.AddrKey(provider), &providerClaim); err != nil { + return err + } else if !found { + return xerrors.Errorf("no verified claim entry for %v", provider) + } + deltaQueue, err := AsStoragePowerDeltaQueue(s, providerClaim.DeltaQueue) + if err != nil { + return err + } + + if err = deltaQueue.Add(epoch, size); err != nil { + return err + } + if providerClaim.DeltaQueue, err = deltaQueue.Root(); err != nil { + return err + } + if err = pvc.Put(abi.AddrKey(provider), &providerClaim); err != nil { + return err + } + return nil +} + +///// Provider event queue array ///// + +// A provider event queue is an ordered collection of events keyed by epoch, implemented as an AMT with +// array values (supporting multiple events at one epoch). +// An array value will be efficient while the number of events per epoch is small, or written in batch. +type ProviderEventQueue struct { + *Array +} + +//type ProviderEventQueueEntry struct { +// Events []VerifiedDealEvent +//} +// +//type VerifiedDealEvent struct { +// EventType VerifiedDealEventType +// VerifiedSpace abi.StoragePower +//} +// +//// Interprets a store as a provider event queue with root `r`. +//func AsProviderEventQueue(s Store, r cid.Cid) (*ProviderEventQueue, error) { +// array, err := AsArray(s, r, ProviderEventQueueAmtBitwidth) +// if err != nil { +// return nil, err +// } +// return &ProviderEventQueue{array}, nil +//} +// +//func (q *ProviderEventQueue) Enqueue(epoch abi.ChainEpoch, eventType VerifiedDealEventType, size abi.StoragePower) error { +// var queueEntry ProviderEventQueueEntry +// _, err := q.Get(uint64(epoch), &queueEntry) +// if err != nil { +// return err +// } +// +// queueEntry.Events = append(queueEntry.Events, VerifiedDealEvent{ +// EventType: eventType, +// VerifiedSpace: size, +// }) +// +// if err := q.Set(uint64(epoch), &queueEntry); err != nil { +// return err +// } +// return nil +//} + +///// Storage power delta queue array ///// + +type StoragePowerDeltaQueue struct { + *Array +} + +// Interprets a store as a queue with root `r`. +func AsStoragePowerDeltaQueue(s Store, r cid.Cid) (*StoragePowerDeltaQueue, error) { + array, err := AsArray(s, r, StoragePowerDeltaQueueAmtBitwidth) + if err != nil { + return nil, err + } + return &StoragePowerDeltaQueue{array}, nil +} + +// Adds `size` to the entry at `epoch`. The entry is taken to be zero if absent. +func (q *StoragePowerDeltaQueue) Add(epoch abi.ChainEpoch, size abi.StoragePower) error { + queueEntry := big.Zero() + _, err := q.Get(uint64(epoch), &queueEntry) + if err != nil { + return err + } + + queueEntry = big.Add(queueEntry, size) + + if err := q.Set(uint64(epoch), &queueEntry); err != nil { + return err + } + return nil +} + +///// Verified reward history array /// + +type VerifiedRewardHistory struct { + *Array +} + +type VerifiedRewardHistoryEntry struct { + TotalVerifiedSpace abi.StoragePower + TotalReward abi.TokenAmount +} + +func AsVerifiedRewardHistory(s Store, r cid.Cid) (*VerifiedRewardHistory, error) { + array, err := AsArray(s, r, VerifiedRewardsHistoryAmtBitwidth) + if err != nil { + return nil, err + } + return &VerifiedRewardHistory{array}, nil +} + +func (a *VerifiedRewardHistory) Set(epoch abi.ChainEpoch, totalVerifiedSpace abi.StoragePower, totalReward abi.TokenAmount) error { + value := VerifiedRewardHistoryEntry{ + TotalVerifiedSpace: totalVerifiedSpace, + TotalReward: totalReward, + } + return a.Array.Set(uint64(epoch), &value) +} + +//func (p ProviderEventQueueEntry) MarshalCBOR(w io.Writer) error { +// panic("implement me") // FIXME +//} +// +//func (p ProviderEventQueueEntry) UnmarshalCBOR(r io.Reader) error { +// panic("implement me") // FIXME +//} + +func (p ProviderVerifiedClaim) UnmarshalCBOR(r io.Reader) error { + panic("implement me") // FIXME +} + +func (p ProviderVerifiedClaim) MarshalCBOR(w io.Writer) error { + panic("implement me") // FIXME +} + +func (v VerifiedRewardHistoryEntry) MarshalCBOR(w io.Writer) error { + panic("implement me") // FIXME +} diff --git a/actors/builtin/methods.go b/actors/builtin/methods.go index 88c31586e..e08a4dd9b 100644 --- a/actors/builtin/methods.go +++ b/actors/builtin/methods.go @@ -26,11 +26,12 @@ var MethodsCron = struct { }{MethodConstructor, 2} var MethodsReward = struct { - Constructor abi.MethodNum - AwardBlockReward abi.MethodNum - ThisEpochReward abi.MethodNum - UpdateNetworkKPI abi.MethodNum -}{MethodConstructor, 2, 3, 4} + Constructor abi.MethodNum + AwardBlockReward abi.MethodNum + ThisEpochReward abi.MethodNum + UpdateNetworkKPI abi.MethodNum + ClaimVerifiedDealReward abi.MethodNum +}{MethodConstructor, 2, 3, 4, 5} var MethodsMultisig = struct { Constructor abi.MethodNum @@ -52,15 +53,15 @@ var MethodsPaych = struct { }{MethodConstructor, 2, 3, 4} var MethodsMarket = struct { - Constructor abi.MethodNum - AddBalance abi.MethodNum - WithdrawBalance abi.MethodNum - PublishStorageDeals abi.MethodNum - VerifyDealsForActivation abi.MethodNum - ActivateDeals abi.MethodNum - OnMinerSectorsTerminate abi.MethodNum - ComputeDataCommitment abi.MethodNum - CronTick abi.MethodNum + Constructor abi.MethodNum + AddBalance abi.MethodNum + WithdrawBalance abi.MethodNum + PublishStorageDeals abi.MethodNum + Deprecated1 abi.MethodNum + ActivateDeals abi.MethodNum + OnMinerSectorsTerminate abi.MethodNum + ComputeDataCommitment abi.MethodNum + CronTick abi.MethodNum }{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9} var MethodsPower = struct { diff --git a/actors/builtin/miner/cbor_gen.go b/actors/builtin/miner/cbor_gen.go index 5a11f0e6b..de83f78e4 100644 --- a/actors/builtin/miner/cbor_gen.go +++ b/actors/builtin/miner/cbor_gen.go @@ -824,8 +824,9 @@ func (t *Deadline) MarshalCBOR(w io.Writer) error { return err } - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { + // t.FaultySectors (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.FaultySectors)); err != nil { return err } @@ -944,13 +945,18 @@ func (t *Deadline) UnmarshalCBOR(r io.Reader) error { t.TotalSectors = uint64(extra) } - // t.FaultyPower (miner.PowerPair) (struct) + // t.FaultySectors (uint64) (uint64) { - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.FaultySectors = uint64(extra) } // t.OptimisticPoStSubmissions (cid.Cid) (struct) @@ -1004,7 +1010,7 @@ func (t *Deadline) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufPartition = []byte{139} +var lengthBufPartition = []byte{135} func (t *Partition) MarshalCBOR(w io.Writer) error { if t == nil { @@ -1054,25 +1060,6 @@ func (t *Partition) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("failed to write cid field t.EarlyTerminated: %w", err) } - // t.LivePower (miner.PowerPair) (struct) - if err := t.LivePower.MarshalCBOR(w); err != nil { - return err - } - - // t.UnprovenPower (miner.PowerPair) (struct) - if err := t.UnprovenPower.MarshalCBOR(w); err != nil { - return err - } - - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { - return err - } - - // t.RecoveringPower (miner.PowerPair) (struct) - if err := t.RecoveringPower.MarshalCBOR(w); err != nil { - return err - } return nil } @@ -1090,7 +1077,7 @@ func (t *Partition) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 11 { + if extra != 7 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -1162,42 +1149,6 @@ func (t *Partition) UnmarshalCBOR(r io.Reader) error { t.EarlyTerminated = c - } - // t.LivePower (miner.PowerPair) (struct) - - { - - if err := t.LivePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.LivePower: %w", err) - } - - } - // t.UnprovenPower (miner.PowerPair) (struct) - - { - - if err := t.UnprovenPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.UnprovenPower: %w", err) - } - - } - // t.FaultyPower (miner.PowerPair) (struct) - - { - - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) - } - - } - // t.RecoveringPower (miner.PowerPair) (struct) - - { - - if err := t.RecoveringPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RecoveringPower: %w", err) - } - } return nil } @@ -1213,6 +1164,8 @@ func (t *ExpirationSet) MarshalCBOR(w io.Writer) error { return err } + scratch := make([]byte, 9) + // t.OnTimeSectors (bitfield.BitField) (struct) if err := t.OnTimeSectors.MarshalCBOR(w); err != nil { return err @@ -1228,15 +1181,18 @@ func (t *ExpirationSet) MarshalCBOR(w io.Writer) error { return err } - // t.ActivePower (miner.PowerPair) (struct) - if err := t.ActivePower.MarshalCBOR(w); err != nil { + // t.ActiveCount (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ActiveCount)); err != nil { return err } - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { + // t.FaultyCount (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.FaultyCount)); err != nil { return err } + return nil } @@ -1285,22 +1241,32 @@ func (t *ExpirationSet) UnmarshalCBOR(r io.Reader) error { } } - // t.ActivePower (miner.PowerPair) (struct) + // t.ActiveCount (uint64) (uint64) { - if err := t.ActivePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ActivePower: %w", err) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ActiveCount = uint64(extra) } - // t.FaultyPower (miner.PowerPair) (struct) + // t.FaultyCount (uint64) (uint64) { - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.FaultyCount = uint64(extra) } return nil @@ -1368,7 +1334,7 @@ func (t *PowerPair) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufSectorPreCommitOnChainInfo = []byte{133} +var lengthBufSectorPreCommitOnChainInfo = []byte{131} func (t *SectorPreCommitOnChainInfo) MarshalCBOR(w io.Writer) error { if t == nil { @@ -1401,16 +1367,6 @@ func (t *SectorPreCommitOnChainInfo) MarshalCBOR(w io.Writer) error { return err } } - - // t.DealWeight (big.Int) (struct) - if err := t.DealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.VerifiedDealWeight (big.Int) (struct) - if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { - return err - } return nil } @@ -1428,7 +1384,7 @@ func (t *SectorPreCommitOnChainInfo) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 5 { + if extra != 3 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -1475,28 +1431,10 @@ func (t *SectorPreCommitOnChainInfo) UnmarshalCBOR(r io.Reader) error { t.PreCommitEpoch = abi.ChainEpoch(extraI) } - // t.DealWeight (big.Int) (struct) - - { - - if err := t.DealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) - } - - } - // t.VerifiedDealWeight (big.Int) (struct) - - { - - if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) - } - - } return nil } -var lengthBufSectorPreCommitInfo = []byte{138} +var lengthBufSectorPreCommitInfo = []byte{134} func (t *SectorPreCommitInfo) MarshalCBOR(w io.Writer) error { if t == nil { @@ -1567,30 +1505,6 @@ func (t *SectorPreCommitInfo) MarshalCBOR(w io.Writer) error { return err } } - - // t.ReplaceCapacity (bool) (bool) - if err := cbg.WriteBool(w, t.ReplaceCapacity); err != nil { - return err - } - - // t.ReplaceSectorDeadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorDeadline)); err != nil { - return err - } - - // t.ReplaceSectorPartition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorPartition)); err != nil { - return err - } - - // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorNumber)); err != nil { - return err - } - return nil } @@ -1608,7 +1522,7 @@ func (t *SectorPreCommitInfo) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 10 { + if extra != 6 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -1746,69 +1660,10 @@ func (t *SectorPreCommitInfo) UnmarshalCBOR(r io.Reader) error { t.Expiration = abi.ChainEpoch(extraI) } - // t.ReplaceCapacity (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.ReplaceCapacity = false - case 21: - t.ReplaceCapacity = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.ReplaceSectorDeadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorDeadline = uint64(extra) - - } - // t.ReplaceSectorPartition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorPartition = uint64(extra) - - } - // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorNumber = abi.SectorNumber(extra) - - } return nil } -var lengthBufSectorOnChainInfo = []byte{142} +var lengthBufSectorOnChainInfo = []byte{140} func (t *SectorOnChainInfo) MarshalCBOR(w io.Writer) error { if t == nil { @@ -1880,16 +1735,6 @@ func (t *SectorOnChainInfo) MarshalCBOR(w io.Writer) error { } } - // t.DealWeight (big.Int) (struct) - if err := t.DealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.VerifiedDealWeight (big.Int) (struct) - if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { - return err - } - // t.InitialPledge (big.Int) (struct) if err := t.InitialPledge.MarshalCBOR(w); err != nil { return err @@ -1950,7 +1795,7 @@ func (t *SectorOnChainInfo) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 14 { + if extra != 12 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -2088,24 +1933,6 @@ func (t *SectorOnChainInfo) UnmarshalCBOR(r io.Reader) error { t.Expiration = abi.ChainEpoch(extraI) } - // t.DealWeight (big.Int) (struct) - - { - - if err := t.DealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) - } - - } - // t.VerifiedDealWeight (big.Int) (struct) - - { - - if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) - } - - } // t.InitialPledge (big.Int) (struct) { diff --git a/actors/builtin/miner/deadline_state.go b/actors/builtin/miner/deadline_state.go index 101ec1fa2..0dd8e9577 100644 --- a/actors/builtin/miner/deadline_state.go +++ b/actors/builtin/miner/deadline_state.go @@ -59,8 +59,8 @@ type Deadline struct { // The total number of sectors in this deadline (incl dead). TotalSectors uint64 - // Memoized sum of faulty power in partitions. - FaultyPower PowerPair + // Memoized count of faulty sectors in partitions. + FaultySectors uint64 // AMT of optimistically accepted WindowPoSt proofs, submitted during // the current challenge window. At the end of the challenge window, @@ -188,7 +188,7 @@ func ConstructDeadline(store adt.Store) (*Deadline, error) { EarlyTerminations: bitfield.New(), LiveSectors: 0, TotalSectors: 0, - FaultyPower: NewPowerPairZero(), + FaultySectors: 0, PartitionsPoSted: bitfield.New(), OptimisticPoStSubmissions: emptyPoStSubmissionsArrayCid, PartitionsSnapshot: emptyPartitionsArrayCid, @@ -307,8 +307,8 @@ func (dl *Deadline) PopExpiredSectors(store adt.Store, until abi.ChainEpoch, qua var onTimeSectors []bitfield.BitField var earlySectors []bitfield.BitField allOnTimePledge := big.Zero() - allActivePower := NewPowerPairZero() - allFaultyPower := NewPowerPairZero() + allActiveCount := uint64(0) + allFaultyCount := uint64(0) var partitionsWithEarlyTerminations []uint64 // For each partition with an expiry, remove and collect expirations from the partition queue. @@ -327,8 +327,8 @@ func (dl *Deadline) PopExpiredSectors(store adt.Store, until abi.ChainEpoch, qua onTimeSectors = append(onTimeSectors, partExpiration.OnTimeSectors) earlySectors = append(earlySectors, partExpiration.EarlySectors) - allActivePower = allActivePower.Add(partExpiration.ActivePower) - allFaultyPower = allFaultyPower.Add(partExpiration.FaultyPower) + allActiveCount += partExpiration.ActiveCount + allFaultyCount += partExpiration.FaultyCount allOnTimePledge = big.Add(allOnTimePledge, partExpiration.OnTimePledge) if empty, err := partExpiration.EarlySectors.IsEmpty(); err != nil { @@ -371,23 +371,21 @@ func (dl *Deadline) PopExpiredSectors(store adt.Store, until abi.ChainEpoch, qua } dl.LiveSectors -= onTimeCount + earlyCount - dl.FaultyPower = dl.FaultyPower.Sub(allFaultyPower) + dl.FaultySectors = dl.FaultySectors - allFaultyCount - return NewExpirationSet(allOnTimeSectors, allEarlySectors, allOnTimePledge, allActivePower, allFaultyPower), nil + return NewExpirationSet(allOnTimeSectors, allEarlySectors, allOnTimePledge, allActiveCount, allFaultyCount) } // Adds sectors to a deadline. It's the caller's responsibility to make sure // that this deadline isn't currently "open" (i.e., being proved at this point // in time). // The sectors are assumed to be non-faulty. -// Returns the power of the added sectors (which is active yet if proven=false). -func (dl *Deadline) AddSectors( - store adt.Store, partitionSize uint64, proven bool, sectors []*SectorOnChainInfo, - ssize abi.SectorSize, quant builtin.QuantSpec, -) (PowerPair, error) { - totalPower := NewPowerPairZero() +// Returns the number of added sectors (which aren't active yet if proven=false). +func (dl *Deadline) AddSectors(store adt.Store, partitionSize uint64, proven bool, sectors []*SectorOnChainInfo, + quant builtin.QuantSpec) (uint64, error) { + addedCount := uint64(0) if len(sectors) == 0 { - return totalPower, nil + return addedCount, nil } // First update partitions, consuming the sectors @@ -398,7 +396,7 @@ func (dl *Deadline) AddSectors( { partitions, err := dl.PartitionsArray(store) if err != nil { - return NewPowerPairZero(), err + return 0, err } partIdx := partitions.Length() @@ -410,20 +408,20 @@ func (dl *Deadline) AddSectors( // Get/create partition to update. partition := new(Partition) if found, err := partitions.Get(partIdx, partition); err != nil { - return NewPowerPairZero(), err + return 0, err } else if !found { // This case will usually happen zero times. // It would require adding more than a full partition in one go to happen more than once. partition, err = ConstructPartition(store) if err != nil { - return NewPowerPairZero(), err + return 0, err } } // Figure out which (if any) sectors we want to add to this partition. sectorCount, err := partition.Sectors.Count() if err != nil { - return NewPowerPairZero(), err + return 0, err } if sectorCount >= partitionSize { continue @@ -434,16 +432,15 @@ func (dl *Deadline) AddSectors( sectors = sectors[size:] // Add sectors to partition. - partitionPower, err := partition.AddSectors(store, proven, partitionNewSectors, ssize, quant) - if err != nil { - return NewPowerPairZero(), err + if err := partition.AddSectors(store, proven, partitionNewSectors, quant); err != nil { + return 0, err } - totalPower = totalPower.Add(partitionPower) + addedCount = addedCount + uint64(len(partitionNewSectors)) // Save partition back. err = partitions.Set(partIdx, partition) if err != nil { - return NewPowerPairZero(), err + return 0, err } // Record deadline -> partition mapping so we can later update the deadlines. @@ -460,7 +457,7 @@ func (dl *Deadline) AddSectors( // Save partitions back. dl.Partitions, err = partitions.Root() if err != nil { - return NewPowerPairZero(), err + return 0, err } } @@ -468,19 +465,19 @@ func (dl *Deadline) AddSectors( { deadlineExpirations, err := LoadBitfieldQueue(store, dl.ExpirationsEpochs, quant, DeadlineExpirationAmtBitwidth) if err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to load expiration epochs: %w", err) + return 0, xerrors.Errorf("failed to load expiration epochs: %w", err) } if err = deadlineExpirations.AddManyToQueueValues(partitionDeadlineUpdates); err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to add expirations for new deadlines: %w", err) + return 0, xerrors.Errorf("failed to add expirations for new deadlines: %w", err) } if dl.ExpirationsEpochs, err = deadlineExpirations.Root(); err != nil { - return NewPowerPairZero(), err + return 0, err } } - return totalPower, nil + return addedCount, nil } func (dl *Deadline) PopEarlyTerminations(store adt.Store, maxPartitions, maxSectors uint64) (result TerminationResult, hasMore bool, err error) { @@ -588,16 +585,15 @@ func (dl *Deadline) TerminateSectors( sectors Sectors, epoch abi.ChainEpoch, partitionSectors PartitionSectorMap, - ssize abi.SectorSize, quant builtin.QuantSpec, -) (powerLost PowerPair, err error) { +) (terminatedCount uint64, err error) { partitions, err := dl.PartitionsArray(store) if err != nil { - return NewPowerPairZero(), err + return 0, err } - powerLost = NewPowerPairZero() + terminatedCount = 0 var partition Partition if err := partitionSectors.ForEach(func(partIdx uint64, sectorNos bitfield.BitField) error { if found, err := partitions.Get(partIdx, &partition); err != nil { @@ -606,7 +602,7 @@ func (dl *Deadline) TerminateSectors( return xc.ErrNotFound.Wrapf("failed to find partition %d", partIdx) } - removed, err := partition.TerminateSectors(store, sectors, epoch, sectorNos, ssize, quant) + removed, err := partition.TerminateSectors(store, sectors, epoch, sectorNos, quant) if err != nil { return xerrors.Errorf("failed to terminate sectors in partition %d: %w", partIdx, err) } @@ -625,22 +621,22 @@ func (dl *Deadline) TerminateSectors( dl.LiveSectors -= count } // note: we should _always_ have early terminations, unless the early termination bitfield is empty. - dl.FaultyPower = dl.FaultyPower.Sub(removed.FaultyPower) + dl.FaultySectors = dl.FaultySectors - removed.FaultyCount // Aggregate power lost from active sectors - powerLost = powerLost.Add(removed.ActivePower) + terminatedCount = terminatedCount + removed.ActiveCount return nil }); err != nil { - return NewPowerPairZero(), err + return 0, err } // save partitions back dl.Partitions, err = partitions.Root() if err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to persist partitions: %w", err) + return 0, xerrors.Errorf("failed to persist partitions: %w", err) } - return powerLost, nil + return terminatedCount, nil } // RemovePartitions removes the specified partitions, shifting the remaining @@ -649,27 +645,27 @@ func (dl *Deadline) TerminateSectors( // Returns an error if any of the partitions contained faulty sectors or early // terminations. func (dl *Deadline) RemovePartitions(store adt.Store, toRemove bitfield.BitField, quant builtin.QuantSpec) ( - live, dead bitfield.BitField, removedPower PowerPair, err error, + live, dead bitfield.BitField, err error, ) { oldPartitions, err := dl.PartitionsArray(store) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to load partitions: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to load partitions: %w", err) } partitionCount := oldPartitions.Length() toRemoveSet, err := toRemove.AllMap(partitionCount) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xc.ErrIllegalArgument.Wrapf("failed to expand partitions into map: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xc.ErrIllegalArgument.Wrapf("failed to expand partitions into map: %w", err) } // Nothing to do. if len(toRemoveSet) == 0 { - return bitfield.NewFromSet(nil), bitfield.NewFromSet(nil), NewPowerPairZero(), nil + return bitfield.NewFromSet(nil), bitfield.NewFromSet(nil), nil } for partIdx := range toRemoveSet { //nolint:nomaprange if partIdx >= partitionCount { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xc.ErrIllegalArgument.Wrapf( + return bitfield.BitField{}, bitfield.BitField{}, xc.ErrIllegalArgument.Wrapf( "partition index %d out of range [0, %d)", partIdx, partitionCount, ) } @@ -678,19 +674,18 @@ func (dl *Deadline) RemovePartitions(store adt.Store, toRemove bitfield.BitField // Should already be checked earlier, but we might as well check again. noEarlyTerminations, err := dl.EarlyTerminations.IsEmpty() if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to check for early terminations: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to check for early terminations: %w", err) } if !noEarlyTerminations { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("cannot remove partitions from deadline with early terminations: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("cannot remove partitions from deadline with early terminations: %w", err) } newPartitions, err := adt.MakeEmptyArray(store, DeadlinePartitionsAmtBitwidth) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to create empty array for initializing partitions: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to create empty array for initializing partitions: %w", err) } allDeadSectors := make([]bitfield.BitField, 0, len(toRemoveSet)) allLiveSectors := make([]bitfield.BitField, 0, len(toRemoveSet)) - removedPower = NewPowerPairZero() // Define all of these out here to save allocations. var ( @@ -738,35 +733,34 @@ func (dl *Deadline) RemovePartitions(store adt.Store, toRemove bitfield.BitField allDeadSectors = append(allDeadSectors, partition.Terminated) allLiveSectors = append(allLiveSectors, liveSectors) - removedPower = removedPower.Add(partition.LivePower) return nil }); err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("while removing partitions: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("while removing partitions: %w", err) } dl.Partitions, err = newPartitions.Root() if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to persist new partition table: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to persist new partition table: %w", err) } dead, err = bitfield.MultiMerge(allDeadSectors...) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to merge dead sector bitfields: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to merge dead sector bitfields: %w", err) } live, err = bitfield.MultiMerge(allLiveSectors...) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to merge live sector bitfields: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to merge live sector bitfields: %w", err) } // Update sector counts. removedDeadSectors, err := dead.Count() if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to count dead sectors: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to count dead sectors: %w", err) } removedLiveSectors, err := live.Count() if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to count live sectors: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to count live sectors: %w", err) } dl.LiveSectors -= removedLiveSectors @@ -776,36 +770,36 @@ func (dl *Deadline) RemovePartitions(store adt.Store, toRemove bitfield.BitField { expirationEpochs, err := LoadBitfieldQueue(store, dl.ExpirationsEpochs, quant, DeadlineExpirationAmtBitwidth) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed to load expiration queue: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed to load expiration queue: %w", err) } err = expirationEpochs.Cut(toRemove) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed cut removed partitions from deadline expiration queue: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed cut removed partitions from deadline expiration queue: %w", err) } dl.ExpirationsEpochs, err = expirationEpochs.Root() if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), xerrors.Errorf("failed persist deadline expiration queue: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, xerrors.Errorf("failed persist deadline expiration queue: %w", err) } } - return live, dead, removedPower, nil + return live, dead, nil } func (dl *Deadline) RecordFaults( store adt.Store, sectors Sectors, ssize abi.SectorSize, quant builtin.QuantSpec, faultExpirationEpoch abi.ChainEpoch, partitionSectors PartitionSectorMap, -) (powerDelta PowerPair, err error) { +) (activeCountDelta int64, err error) { partitions, err := dl.PartitionsArray(store) if err != nil { - return NewPowerPairZero(), err + return 0, err } // Record partitions with some fault, for subsequently indexing in the deadline. // Duplicate entries don't matter, they'll be stored in a bitfield (a set). partitionsWithFault := make([]uint64, 0, len(partitionSectors)) - powerDelta = NewPowerPairZero() + activeCountDelta = 0 if err := partitionSectors.ForEach(func(partIdx uint64, sectorNos bitfield.BitField) error { var partition Partition if found, err := partitions.Get(partIdx, &partition); err != nil { @@ -814,14 +808,14 @@ func (dl *Deadline) RecordFaults( return xc.ErrNotFound.Wrapf("no such partition %d", partIdx) } - newFaults, partitionPowerDelta, partitionNewFaultyPower, err := partition.RecordFaults( + newFaults, partitionActiveCountDelta, partitionNewFaultCount, err := partition.RecordFaults( store, sectors, sectorNos, faultExpirationEpoch, ssize, quant, ) if err != nil { return xerrors.Errorf("failed to declare faults in partition %d: %w", partIdx, err) } - dl.FaultyPower = dl.FaultyPower.Add(partitionNewFaultyPower) - powerDelta = powerDelta.Add(partitionPowerDelta) + dl.FaultySectors += partitionNewFaultCount + activeCountDelta += partitionActiveCountDelta if empty, err := newFaults.IsEmpty(); err != nil { return xerrors.Errorf("failed to count new faults: %w", err) } else if !empty { @@ -835,20 +829,20 @@ func (dl *Deadline) RecordFaults( return nil }); err != nil { - return NewPowerPairZero(), err + return 0, err } dl.Partitions, err = partitions.Root() if err != nil { - return NewPowerPairZero(), xc.ErrIllegalState.Wrapf("failed to store partitions root: %w", err) + return 0, xc.ErrIllegalState.Wrapf("failed to store partitions root: %w", err) } err = dl.AddExpirationPartitions(store, faultExpirationEpoch, partitionsWithFault, quant) if err != nil { - return NewPowerPairZero(), xc.ErrIllegalState.Wrapf("failed to update expirations for partitions with faults: %w", err) + return 0, xc.ErrIllegalState.Wrapf("failed to update expirations for partitions with faults: %w", err) } - return powerDelta, nil + return activeCountDelta, nil } func (dl *Deadline) DeclareFaultsRecovered( @@ -868,7 +862,7 @@ func (dl *Deadline) DeclareFaultsRecovered( return xc.ErrNotFound.Wrapf("no such partition %d", partIdx) } - if err = partition.DeclareFaultsRecovered(sectors, ssize, sectorNos); err != nil { + if err = partition.DeclareFaultsRecovered(sectorNos); err != nil { return xc.ErrIllegalState.Wrapf("failed to add recoveries: %w", err) } @@ -894,14 +888,14 @@ func (dl *Deadline) DeclareFaultsRecovered( // faulty and clearing failed recoveries. It returns the power delta, and any // power that should be penalized (new faults and failed recoveries). func (dl *Deadline) ProcessDeadlineEnd(store adt.Store, quant builtin.QuantSpec, faultExpirationEpoch abi.ChainEpoch, sectors cid.Cid) ( - powerDelta, penalizedPower PowerPair, err error, + activeCountDelta int64, penalisedCount uint64, err error, ) { - powerDelta = NewPowerPairZero() - penalizedPower = NewPowerPairZero() + activeCountDelta = 0 + penalisedCount = 0 partitions, err := dl.PartitionsArray(store) if err != nil { - return powerDelta, penalizedPower, xerrors.Errorf("failed to load partitions: %w", err) + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to load partitions: %w", err) } detectedAny := false @@ -909,7 +903,7 @@ func (dl *Deadline) ProcessDeadlineEnd(store adt.Store, quant builtin.QuantSpec, for partIdx := uint64(0); partIdx < partitions.Length(); partIdx++ { proven, err := dl.PartitionsPoSted.IsSet(partIdx) if err != nil { - return powerDelta, penalizedPower, xerrors.Errorf("failed to check submission for partition %d: %w", partIdx, err) + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to check submission for partition %d: %w", partIdx, err) } if proven { continue @@ -918,56 +912,72 @@ func (dl *Deadline) ProcessDeadlineEnd(store adt.Store, quant builtin.QuantSpec, var partition Partition found, err := partitions.Get(partIdx, &partition) if err != nil { - return powerDelta, penalizedPower, xerrors.Errorf("failed to load partition %d: %w", partIdx, err) + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to load partition %d: %w", partIdx, err) } if !found { - return powerDelta, penalizedPower, xerrors.Errorf("no partition %d", partIdx) + return activeCountDelta, penalisedCount, xerrors.Errorf("no partition %d", partIdx) } + noRecoveries, err := partition.Recoveries.IsEmpty() + if err != nil { + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to count recoveries for partition %d: %w", partIdx, err) + } + faultCount, err := partition.Faults.Count() + if err != nil { + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to count faults for partition %d: %w", partIdx, err) + } + liveSectors, err := partition.LiveSectors() + if err != nil { + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to compute live sectors for partition %d: %w", partIdx, err) + } + liveCount, err := liveSectors.Count() + if err != nil { + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to count live sectors for partition %d: %w", partIdx, err) + } // If we have no recovering power/sectors, and all power is faulty, skip // this. This lets us skip some work if a miner repeatedly fails to PoSt. - if partition.RecoveringPower.IsZero() && partition.FaultyPower.Equals(partition.LivePower) { + if noRecoveries && faultCount == liveCount { continue } // Ok, we actually need to process this partition. Make sure we save the partition state back. detectedAny = true - partPowerDelta, partPenalizedPower, partNewFaultyPower, err := partition.RecordMissedPost(store, faultExpirationEpoch, quant) + partActiveCountDelta, partPenalizedCount, partNewFaultCount, err := partition.RecordMissedPost(store, faultExpirationEpoch, quant) if err != nil { - return powerDelta, penalizedPower, xerrors.Errorf("failed to record missed PoSt for partition %v: %w", partIdx, err) + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to record missed PoSt for partition %v: %w", partIdx, err) } // We marked some sectors faulty, we need to record the new // expiration. We don't want to do this if we're just penalizing // the miner for failing to recover power. - if !partNewFaultyPower.IsZero() { + if partNewFaultCount > 0 { rescheduledPartitions = append(rescheduledPartitions, partIdx) } // Save new partition state. err = partitions.Set(partIdx, &partition) if err != nil { - return powerDelta, penalizedPower, xerrors.Errorf("failed to update partition %v: %w", partIdx, err) + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to update partition %v: %w", partIdx, err) } - dl.FaultyPower = dl.FaultyPower.Add(partNewFaultyPower) + dl.FaultySectors = dl.FaultySectors + partNewFaultCount - powerDelta = powerDelta.Add(partPowerDelta) - penalizedPower = penalizedPower.Add(partPenalizedPower) + activeCountDelta = activeCountDelta + partActiveCountDelta + penalisedCount = penalisedCount + partPenalizedCount } // Save modified deadline state. if detectedAny { dl.Partitions, err = partitions.Root() if err != nil { - return powerDelta, penalizedPower, xc.ErrIllegalState.Wrapf("failed to store partitions: %w", err) + return activeCountDelta, penalisedCount, xc.ErrIllegalState.Wrapf("failed to store partitions: %w", err) } } err = dl.AddExpirationPartitions(store, faultExpirationEpoch, rescheduledPartitions, quant) if err != nil { - return powerDelta, penalizedPower, xc.ErrIllegalState.Wrapf("failed to update deadline expiration queue: %w", err) + return activeCountDelta, penalisedCount, xc.ErrIllegalState.Wrapf("failed to update deadline expiration queue: %w", err) } // Reset PoSt submissions, snapshot proofs. @@ -977,16 +987,16 @@ func (dl *Deadline) ProcessDeadlineEnd(store adt.Store, quant builtin.QuantSpec, dl.OptimisticPoStSubmissionsSnapshot = dl.OptimisticPoStSubmissions dl.OptimisticPoStSubmissions, err = adt.StoreEmptyArray(store, DeadlineOptimisticPoStSubmissionsAmtBitwidth) if err != nil { - return powerDelta, penalizedPower, xerrors.Errorf("failed to clear pending proofs array: %w", err) + return activeCountDelta, penalisedCount, xerrors.Errorf("failed to clear pending proofs array: %w", err) } - return powerDelta, penalizedPower, nil + return activeCountDelta, penalisedCount, nil } type PoStResult struct { - // Power activated or deactivated (positive or negative). - PowerDelta PowerPair - // Powers used for calculating penalties. - NewFaultyPower, RetractedRecoveryPower, RecoveredPower PowerPair + // Sectors activated or deactivated (positive or negative). + ActiveCountDelta int64 + // Counts used for calculating penalties. + NewFaultyCount, RetractedRecoveryCount, RecoveredCount uint64 // Sectors is a bitfield of all sectors in the proven partitions. Sectors bitfield.BitField // IgnoredSectors is a subset of Sectors that should be ignored. @@ -999,8 +1009,8 @@ type PoStResult struct { // and marking skipped sectors as faulty. // // It returns a PoStResult containing the list of proven and skipped sectors and -// changes to power (newly faulty power, power that should have been proven -// recovered but wasn't, and newly recovered power). +// changes to sector state counts (newly faulty, should have been proven +// recovered but wasn't, and newly recovered). // // NOTE: This function does not actually _verify_ any proofs. func (dl *Deadline) RecordProvenSectors( @@ -1036,10 +1046,10 @@ func (dl *Deadline) RecordProvenSectors( allSectors := make([]bitfield.BitField, 0, len(postPartitions)) allIgnored := make([]bitfield.BitField, 0, len(postPartitions)) - newFaultyPowerTotal := NewPowerPairZero() - retractedRecoveryPowerTotal := NewPowerPairZero() - recoveredPowerTotal := NewPowerPairZero() - powerDelta := NewPowerPairZero() + newFaultyCountTotal := uint64(0) + retractedRecoveryCountTotal := uint64(0) + recoveredCountTotal := uint64(0) + activeCountDelta := int64(0) var rescheduledPartitions []uint64 // Accumulate sectors info for proof verification. @@ -1054,7 +1064,7 @@ func (dl *Deadline) RecordProvenSectors( // Process new faults and accumulate new faulty power. // This updates the faults in partition state ahead of calculating the sectors to include for proof. - newPowerDelta, newFaultPower, retractedRecoveryPower, hasNewFaults, err := partition.RecordSkippedFaults( + newActiveCountDelta, newFaultCount, retractedRecoveryCount, hasNewFaults, err := partition.RecordSkippedFaults( store, sectors, ssize, quant, faultExpiration, post.Skipped, ) if err != nil { @@ -1073,7 +1083,11 @@ func (dl *Deadline) RecordProvenSectors( } // Finally, activate power for newly proven sectors. - newPowerDelta = newPowerDelta.Add(partition.ActivateUnproven()) + activated, err := partition.ActivateUnproven() + if err != nil { + return nil, xerrors.Errorf("failed to activate unproven sectors for partition %d: %w", post.Index, err) + } + newActiveCountDelta = newActiveCountDelta + int64(activated) // This will be rolled back if the method aborts with a failed proof. err = partitions.Set(post.Index, &partition) @@ -1081,10 +1095,10 @@ func (dl *Deadline) RecordProvenSectors( return nil, xc.ErrIllegalState.Wrapf("failed to update partition %v: %w", post.Index, err) } - newFaultyPowerTotal = newFaultyPowerTotal.Add(newFaultPower) - retractedRecoveryPowerTotal = retractedRecoveryPowerTotal.Add(retractedRecoveryPower) - recoveredPowerTotal = recoveredPowerTotal.Add(recoveredPower) - powerDelta = powerDelta.Add(newPowerDelta).Add(recoveredPower) + newFaultyCountTotal = newFaultyCountTotal + newFaultCount + retractedRecoveryCountTotal = retractedRecoveryCountTotal + retractedRecoveryCount + recoveredCountTotal = recoveredCountTotal + recoveredPower + activeCountDelta = activeCountDelta + newActiveCountDelta + int64(recoveredPower) // Record the post. dl.PartitionsPoSted.Set(post.Index) @@ -1102,7 +1116,7 @@ func (dl *Deadline) RecordProvenSectors( } // Save everything back. - dl.FaultyPower = dl.FaultyPower.Sub(recoveredPowerTotal).Add(newFaultyPowerTotal) + dl.FaultySectors = (dl.FaultySectors - recoveredCountTotal) + newFaultyCountTotal dl.Partitions, err = partitions.Root() if err != nil { @@ -1122,10 +1136,10 @@ func (dl *Deadline) RecordProvenSectors( return &PoStResult{ Sectors: allSectorNos, IgnoredSectors: allIgnoredSectorNos, - PowerDelta: powerDelta, - NewFaultyPower: newFaultyPowerTotal, - RecoveredPower: recoveredPowerTotal, - RetractedRecoveryPower: retractedRecoveryPowerTotal, + ActiveCountDelta: activeCountDelta, + NewFaultyCount: newFaultyCountTotal, + RecoveredCount: recoveredCountTotal, + RetractedRecoveryCount: retractedRecoveryCountTotal, Partitions: partitionIndexes, }, nil } @@ -1184,7 +1198,6 @@ func (dl *Deadline) TakePoStProofs(store adt.Store, idx uint64) (partitions bitf type DisputeInfo struct { AllSectorNos, IgnoredSectorNos bitfield.BitField DisputedSectors PartitionSectorMap - DisputedPower PowerPair } // LoadPartitionsForDispute @@ -1196,7 +1209,6 @@ func (dl *Deadline) LoadPartitionsForDispute(store adt.Store, partitions bitfiel var allSectors, allIgnored []bitfield.BitField disputedSectors := make(PartitionSectorMap) - disputedPower := NewPowerPairZero() err = partitions.ForEach(func(partIdx uint64) error { var partitionSnapshot Partition if found, err := partitionsSnapshot.Get(partIdx, &partitionSnapshot); err != nil { @@ -1211,7 +1223,9 @@ func (dl *Deadline) LoadPartitionsForDispute(store adt.Store, partitions bitfiel allIgnored = append(allIgnored, partitionSnapshot.Terminated) allIgnored = append(allIgnored, partitionSnapshot.Unproven) - // Record active sectors for marking faults. + // Record active sectors for marking faults and computing penalties. + // NOTE: This also includes sectors that were activated at the end of the last challenge + // window, and sectors that have since expired. active, err := partitionSnapshot.ActiveSectors() if err != nil { return err @@ -1221,13 +1235,6 @@ func (dl *Deadline) LoadPartitionsForDispute(store adt.Store, partitions bitfiel return err } - // Record disputed power for penalties. - // - // NOTE: This also includes power that was - // activated at the end of the last challenge - // window, and power from sectors that have since - // expired. - disputedPower = disputedPower.Add(partitionSnapshot.ActivePower()) return nil }) if err != nil { @@ -1248,7 +1255,6 @@ func (dl *Deadline) LoadPartitionsForDispute(store adt.Store, partitions bitfiel AllSectorNos: allSectorsNos, IgnoredSectorNos: allIgnoredNos, DisputedSectors: disputedSectors, - DisputedPower: disputedPower, }, nil } @@ -1289,10 +1295,9 @@ func (d *Deadline) ValidateState() error { if d.LiveSectors > d.TotalSectors { return xerrors.Errorf("Deadline left with more live sectors than total: %v", d) } - - if d.FaultyPower.Raw.LessThan(big.Zero()) || d.FaultyPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("Deadline left with negative faulty power: %v", d) - } + //if d.FaultyPower.Raw.LessThan(big.Zero()) || d.FaultyPower.QA.LessThan(big.Zero()) { + // return xerrors.Errorf("Deadline left with negative faulty power: %v", d) + //} return nil } diff --git a/actors/builtin/miner/deadline_state_test.go b/actors/builtin/miner/deadline_state_test.go index f3bb96f77..0dc57a492 100644 --- a/actors/builtin/miner/deadline_state_test.go +++ b/actors/builtin/miner/deadline_state_test.go @@ -46,8 +46,8 @@ func TestDeadlines(t *testing.T) { sectors: allSectors, } - sectorPower := func(t *testing.T, sectorNos ...uint64) miner.PowerPair { - return miner.PowerForSectors(sectorSize, selectSectors(t, allSectors, bf(sectorNos...))) + sectorPower := func(t *testing.T, sectorNos ...uint64) abi.StoragePower { + return miner.SectorsPower(sectorSize, len(sectorNos)) } // @@ -60,8 +60,8 @@ func TestDeadlines(t *testing.T) { // Partition 2: sectors 5, 6, 7, 8 // Partition 3: sectors 9 addSectors := func(t *testing.T, store adt.Store, dl *miner.Deadline, prove bool) { - power := miner.PowerForSectors(sectorSize, sectors) - activatedPower, err := dl.AddSectors(store, partitionSize, false, sectors, sectorSize, quantSpec) + power := miner.SectorsPower(sectorSize, len(sectors)) + activatedPower, err := dl.AddSectors(store, partitionSize, false, sectors, quantSpec) require.NoError(t, err) assert.True(t, activatedPower.Equals(power)) @@ -81,7 +81,7 @@ func TestDeadlines(t *testing.T) { // Prove everything result, err := dl.RecordProvenSectors(store, sectorArr, sectorSize, quantSpec, 0, []miner.PoStPartition{{Index: 0}, {Index: 1}, {Index: 2}}) require.NoError(t, err) - require.True(t, result.PowerDelta.Equals(power)) + require.True(t, result.ActiveCountDelta.Equals(power)) sectorArrRoot, err := sectorArr.Root() require.NoError(t, err) @@ -108,7 +108,7 @@ func TestDeadlines(t *testing.T) { removedPower, err := dl.TerminateSectors(store, sectorsArr(t, store, sectors), 15, miner.PartitionSectorMap{ 0: bf(1, 3), 1: bf(6), - }, sectorSize, quantSpec) + }, quantSpec) require.NoError(t, err) expectedPower := miner.NewPowerPairZero() @@ -158,7 +158,7 @@ func TestDeadlines(t *testing.T) { require.NoError(t, err, "should have removed partitions") assertBitfieldEquals(t, live, 2, 4) assertBitfieldEquals(t, dead, 1, 3) - livePower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, live)) + livePower := miner.SectorsPower(sectorSize, 2) require.True(t, livePower.Equals(removedPower)) dlState.withTerminations(6). @@ -360,11 +360,11 @@ func TestDeadlines(t *testing.T) { removedPower, err := dl.TerminateSectors(store, sectorArr, 15, miner.PartitionSectorMap{ 0: bf(1, 3), 1: bf(6), - }, sectorSize, quantSpec) + }, quantSpec) require.NoError(t, err) // Sector 3 active, 1, 6 faulty - expectedPowerLoss := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(3))) + expectedPowerLoss := miner.SectorPower(sectorSize) require.True(t, expectedPowerLoss.Equals(removedPower), "dlState to remove power for terminated sectors") dlState.withTerminations(1, 3, 6). @@ -386,7 +386,7 @@ func TestDeadlines(t *testing.T) { removedPower, err := dl.TerminateSectors(store, sectorArr, 15, miner.PartitionSectorMap{ 0: bf(1, 3), 1: bf(6), - }, sectorSize, quantSpec) + }, quantSpec) require.NoError(t, err) // Sector 3 unproven, 1, 6 faulty @@ -411,7 +411,7 @@ func TestDeadlines(t *testing.T) { sectorArr := sectorsArr(t, store, sectors) _, err := dl.TerminateSectors(store, sectorArr, 15, miner.PartitionSectorMap{ 0: bf(6), - }, sectorSize, quantSpec) + }, quantSpec) require.Error(t, err) require.Contains(t, err.Error(), "can only terminate live sectors") }) @@ -425,7 +425,7 @@ func TestDeadlines(t *testing.T) { sectorArr := sectorsArr(t, store, sectors) _, err := dl.TerminateSectors(store, sectorArr, 15, miner.PartitionSectorMap{ 4: bf(6), - }, sectorSize, quantSpec) + }, quantSpec) require.Error(t, err) require.Contains(t, err.Error(), "failed to find partition 4") }) @@ -439,7 +439,7 @@ func TestDeadlines(t *testing.T) { sectorArr := sectorsArr(t, store, sectors) _, err := dl.TerminateSectors(store, sectorArr, 15, miner.PartitionSectorMap{ 0: bf(1, 2), - }, sectorSize, quantSpec) + }, quantSpec) require.Error(t, err) require.Contains(t, err.Error(), "can only terminate live sectors") }) @@ -506,9 +506,9 @@ func TestDeadlines(t *testing.T) { addSectors(t, store, dl, true) // add an inactive sector - power, err := dl.AddSectors(store, partitionSize, false, extraSectors, sectorSize, quantSpec) + power, err := dl.AddSectors(store, partitionSize, false, extraSectors, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, extraSectors) + expectedPower := miner.SectorsPower(sectorSize, len(extraSectors)) assert.True(t, expectedPower.Equals(power)) sectorArr := sectorsArr(t, store, allSectors) @@ -520,9 +520,9 @@ func TestDeadlines(t *testing.T) { require.NoError(t, err) assertBitfieldEquals(t, postResult1.Sectors, 1, 2, 3, 4, 5, 6, 7, 8) assertEmptyBitfield(t, postResult1.IgnoredSectors) - require.True(t, postResult1.NewFaultyPower.Equals(miner.NewPowerPairZero())) - require.True(t, postResult1.RetractedRecoveryPower.Equals(miner.NewPowerPairZero())) - require.True(t, postResult1.RecoveredPower.Equals(miner.NewPowerPairZero())) + require.True(t, postResult1.NewFaultyCount.Equals(miner.NewPowerPairZero())) + require.True(t, postResult1.RetractedRecoveryCount.Equals(miner.NewPowerPairZero())) + require.True(t, postResult1.RecoveredCount.Equals(miner.NewPowerPairZero())) // First two partitions posted dlState.withPosts(0, 1). @@ -539,11 +539,11 @@ func TestDeadlines(t *testing.T) { require.NoError(t, err) assertBitfieldEquals(t, postResult2.Sectors, 9, 10) assertEmptyBitfield(t, postResult2.IgnoredSectors) - require.True(t, postResult2.NewFaultyPower.Equals(miner.NewPowerPairZero())) - require.True(t, postResult2.RetractedRecoveryPower.Equals(miner.NewPowerPairZero())) - require.True(t, postResult2.RecoveredPower.Equals(miner.NewPowerPairZero())) + require.True(t, postResult2.NewFaultyCount.Equals(miner.NewPowerPairZero())) + require.True(t, postResult2.RetractedRecoveryCount.Equals(miner.NewPowerPairZero())) + require.True(t, postResult2.RecoveredCount.Equals(miner.NewPowerPairZero())) // activate sector 10 - require.True(t, postResult2.PowerDelta.Equals(sectorPower(t, 10))) + require.True(t, postResult2.ActiveCountDelta.Equals(sectorPower(t, 10))) // All 3 partitions posted, unproven sector 10 proven and power activated. dlState.withPosts(0, 1, 2). @@ -579,9 +579,9 @@ func TestDeadlines(t *testing.T) { addThenMarkFaulty(t, store, dl, true) // add an inactive sector - power, err := dl.AddSectors(store, partitionSize, false, extraSectors, sectorSize, quantSpec) + power, err := dl.AddSectors(store, partitionSize, false, extraSectors, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, extraSectors) + expectedPower := miner.SectorsPower(sectorSize, len(extraSectors)) assert.True(t, expectedPower.Equals(power)) sectorArr := sectorsArr(t, store, allSectors) @@ -617,13 +617,13 @@ func TestDeadlines(t *testing.T) { assertBitfieldEquals(t, postResult.Sectors, 1, 2, 3, 4, 5, 6, 7, 8) assertBitfieldEquals(t, postResult.IgnoredSectors, 1, 5, 7) // sector 7 is newly faulty - require.True(t, postResult.NewFaultyPower.Equals(sectorPower(t, 7))) + require.True(t, postResult.NewFaultyCount.Equals(sectorPower(t, 7))) // we failed to recover 1 (retracted) - require.True(t, postResult.RetractedRecoveryPower.Equals(sectorPower(t, 1))) + require.True(t, postResult.RetractedRecoveryCount.Equals(sectorPower(t, 1))) // we recovered 6 - require.True(t, postResult.RecoveredPower.Equals(sectorPower(t, 6))) + require.True(t, postResult.RecoveredCount.Equals(sectorPower(t, 6))) // no power delta from these deadlines. - require.True(t, postResult.PowerDelta.IsZero()) + require.True(t, postResult.ActiveCountDelta.IsZero()) // First two partitions should be posted. dlState.withPosts(0, 1). @@ -666,9 +666,9 @@ func TestDeadlines(t *testing.T) { addSectors(t, store, dl, true) // add an inactive sector - power, err := dl.AddSectors(store, partitionSize, false, extraSectors, sectorSize, quantSpec) + power, err := dl.AddSectors(store, partitionSize, false, extraSectors, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, extraSectors) + expectedPower := miner.SectorsPower(sectorSize, len(extraSectors)) assert.True(t, expectedPower.Equals(power)) sectorArr := sectorsArr(t, store, allSectors) @@ -682,10 +682,10 @@ func TestDeadlines(t *testing.T) { require.NoError(t, err) assertBitfieldEquals(t, postResult1.Sectors, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) assertBitfieldEquals(t, postResult1.IgnoredSectors, 10) - require.True(t, postResult1.NewFaultyPower.Equals(sectorPower(t, 10))) - require.True(t, postResult1.PowerDelta.IsZero()) // not proven yet. - require.True(t, postResult1.RetractedRecoveryPower.IsZero()) - require.True(t, postResult1.RecoveredPower.IsZero()) + require.True(t, postResult1.NewFaultyCount.Equals(sectorPower(t, 10))) + require.True(t, postResult1.ActiveCountDelta.IsZero()) // not proven yet. + require.True(t, postResult1.RetractedRecoveryCount.IsZero()) + require.True(t, postResult1.RecoveredCount.IsZero()) // All posted dlState.withPosts(0, 1, 2). @@ -723,9 +723,9 @@ func TestDeadlines(t *testing.T) { addSectors(t, store, dl, true) // add an inactive sector - power, err := dl.AddSectors(store, partitionSize, false, extraSectors, sectorSize, quantSpec) + power, err := dl.AddSectors(store, partitionSize, false, extraSectors, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, extraSectors) + expectedPower := miner.SectorsPower(sectorSize, len(extraSectors)) assert.True(t, expectedPower.Equals(power)) sectorArr := sectorsArr(t, store, allSectors) @@ -745,9 +745,9 @@ func TestDeadlines(t *testing.T) { addSectors(t, store, dl, true) // add an inactive sector - power, err := dl.AddSectors(store, partitionSize, false, extraSectors, sectorSize, quantSpec) + power, err := dl.AddSectors(store, partitionSize, false, extraSectors, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, extraSectors) + expectedPower := miner.SectorsPower(sectorSize, len(extraSectors)) assert.True(t, expectedPower.Equals(power)) sectorArr := sectorsArr(t, store, allSectors) @@ -805,11 +805,11 @@ func TestDeadlines(t *testing.T) { assertBitfieldEquals(t, postResult.Sectors, 1, 2, 3, 4, 5, 6, 7, 8, 9) assertBitfieldEquals(t, postResult.IgnoredSectors, 1, 5) // All faults were declared. - require.True(t, postResult.NewFaultyPower.Equals(miner.NewPowerPairZero())) + require.True(t, postResult.NewFaultyCount.Equals(miner.NewPowerPairZero())) // we didn't fail to recover anything. - require.True(t, postResult.RetractedRecoveryPower.Equals(miner.NewPowerPairZero())) + require.True(t, postResult.RetractedRecoveryCount.Equals(miner.NewPowerPairZero())) // we recovered 6. - require.True(t, postResult.RecoveredPower.Equals(sectorPower(t, 6))) + require.True(t, postResult.RecoveredCount.Equals(sectorPower(t, 6))) // First two partitions should be posted. dlState.withPosts(0, 1, 2). @@ -978,7 +978,7 @@ func checkDeadlineInvariants( allUnproven bitfield.BitField, ) { msgs := &builtin.MessageAccumulator{} - summary := miner.CheckDeadlineStateInvariants(dl, store, quant, ssize, sectorsAsMap(sectors), msgs) + summary := miner.CheckDeadlineStateInvariants(dl, store, quant, sectorsAsMap(sectors), msgs) assert.True(t, msgs.IsEmpty(), strings.Join(msgs.Messages(), "\n")) allSectors = summary.AllSectors diff --git a/actors/builtin/miner/expiration_queue.go b/actors/builtin/miner/expiration_queue.go index 556838f9c..04be93207 100644 --- a/actors/builtin/miner/expiration_queue.go +++ b/actors/builtin/miner/expiration_queue.go @@ -2,6 +2,7 @@ package miner import ( "fmt" + "math" "sort" "github.com/filecoin-project/go-bitfield" @@ -18,33 +19,49 @@ import ( // ExpirationSet is a collection of sector numbers that are expiring, either due to // expected "on-time" expiration at the end of their life, or unexpected "early" termination // due to being faulty for too long consecutively. -// Note that there is not a direct correspondence between on-time sectors and active power; +// Note that there is not a direct correspondence between on-time sectors and active sectors; // a sector may be faulty but expiring on-time if it faults just prior to expected termination. -// Early sectors are always faulty, and active power always represents on-time sectors. +// Early sectors are always faulty, and active sectors always represent on-time sectors. type ExpirationSet struct { OnTimeSectors bitfield.BitField // Sectors expiring "on time" at the end of their committed life EarlySectors bitfield.BitField // Sectors expiring "early" due to being faulty for too long OnTimePledge abi.TokenAmount // Pledge total for the on-time sectors - ActivePower PowerPair // Power that is currently active (not faulty) - FaultyPower PowerPair // Power that is currently faulty + ActiveCount uint64 // Count of sectors that are currently active (not faulty) + FaultyCount uint64 // Count of sectors that are currently faulty } func NewExpirationSetEmpty() *ExpirationSet { - return NewExpirationSet(bitfield.New(), bitfield.New(), big.Zero(), NewPowerPairZero(), NewPowerPairZero()) + set, _ := NewExpirationSet(bitfield.New(), bitfield.New(), big.Zero(), 0, 0) + return set } -func NewExpirationSet(onTimeSectors, earlySectors bitfield.BitField, onTimePledge abi.TokenAmount, activePower, faultyPower PowerPair) *ExpirationSet { - return &ExpirationSet{ +func NewExpirationSet(onTimeSectors, earlySectors bitfield.BitField, onTimePledge abi.TokenAmount, activeCount, faultyCount uint64) (*ExpirationSet, error) { + set := &ExpirationSet{ OnTimeSectors: onTimeSectors, EarlySectors: earlySectors, OnTimePledge: onTimePledge, - ActivePower: activePower, - FaultyPower: faultyPower, + ActiveCount: activeCount, + FaultyCount: faultyCount, } + if err := set.ValidateState(); err != nil { + return nil, err + } + return set, nil } -// Adds sectors and power to the expiration set in place. -func (es *ExpirationSet) Add(onTimeSectors, earlySectors bitfield.BitField, onTimePledge abi.TokenAmount, activePower, faultyPower PowerPair) error { +// Adds sectors to the expiration set in place. +func (es *ExpirationSet) Add(onTimeSectors, earlySectors bitfield.BitField, onTimePledge abi.TokenAmount, activeCount, faultyCount uint64) error { + // Check overflow. + if onTimePledge.LessThan(big.Zero()) { + return xerrors.Errorf("negative pledge added to expiration set: %+v", es) + } + if activeCount > math.MaxUint64-es.ActiveCount { + return xerrors.Errorf("expiration set active count overflow: %+v", es) + } + if faultyCount > math.MaxUint64-es.FaultyCount { + return xerrors.Errorf("expiration set active count overflow: %+v", es) + } + var err error if es.OnTimeSectors, err = bitfield.MergeBitFields(es.OnTimeSectors, onTimeSectors); err != nil { return err @@ -53,13 +70,13 @@ func (es *ExpirationSet) Add(onTimeSectors, earlySectors bitfield.BitField, onTi return err } es.OnTimePledge = big.Add(es.OnTimePledge, onTimePledge) - es.ActivePower = es.ActivePower.Add(activePower) - es.FaultyPower = es.FaultyPower.Add(faultyPower) + es.ActiveCount += activeCount + es.FaultyCount += faultyCount return es.ValidateState() } -// Removes sectors and power from the expiration set in place. -func (es *ExpirationSet) Remove(onTimeSectors, earlySectors bitfield.BitField, onTimePledge abi.TokenAmount, activePower, faultyPower PowerPair) error { +// Removes sectors from the expiration set in place. +func (es *ExpirationSet) Remove(onTimeSectors, earlySectors bitfield.BitField, onTimePledge abi.TokenAmount, activeCount, faultyCount uint64) error { // Check for sector intersection. This could be cheaper with a combined intersection/difference method used below. if found, err := util.BitFieldContainsAll(es.OnTimeSectors, onTimeSectors); err != nil { return err @@ -79,71 +96,62 @@ func (es *ExpirationSet) Remove(onTimeSectors, earlySectors bitfield.BitField, o if es.EarlySectors, err = bitfield.SubtractBitField(es.EarlySectors, earlySectors); err != nil { return err } - es.OnTimePledge = big.Sub(es.OnTimePledge, onTimePledge) - es.ActivePower = es.ActivePower.Sub(activePower) - es.FaultyPower = es.FaultyPower.Sub(faultyPower) + // Check underflow. - if es.OnTimePledge.LessThan(big.Zero()) { - return xerrors.Errorf("expiration set pledge underflow: %v", es) + if onTimePledge.LessThan(big.Zero()) { + return xerrors.Errorf("negative pledge removed from expiration set: %+v", es) } - if es.ActivePower.QA.LessThan(big.Zero()) || es.FaultyPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("expiration set power underflow: %v", es) + if es.OnTimePledge.LessThan(onTimePledge) { + return xerrors.Errorf("expiration set pledge underflow: %+v", es) } + if es.ActiveCount < activeCount || es.FaultyCount < faultyCount { + return xerrors.Errorf("expiration set sector count underflow: %+v", es) + } + + es.OnTimePledge = big.Sub(es.OnTimePledge, onTimePledge) + es.ActiveCount -= activeCount + es.FaultyCount -= faultyCount return es.ValidateState() } // A set is empty if it has no sectors. -// The power and pledge are not checked, but expected to be zero. -func (es *ExpirationSet) IsEmpty() (empty bool, err error) { - if empty, err = es.OnTimeSectors.IsEmpty(); err != nil { - return false, err - } else if empty { - if empty, err = es.EarlySectors.IsEmpty(); err != nil { - return false, err - } - return empty, nil - } else { - return false, nil - } +// The pledge is not checked, but expected to be zero. +func (es *ExpirationSet) IsEmpty() bool { + return es.ActiveCount == 0 && es.FaultyCount == 0 } // Counts all sectors in the expiration set. func (es *ExpirationSet) Count() (count uint64, err error) { - onTime, err := es.OnTimeSectors.Count() - if err != nil { - return 0, err - } - - early, err := es.EarlySectors.Count() - if err != nil { - return 0, err + if es.ActiveCount > math.MaxUint64-es.FaultyCount { + return 0, xerrors.Errorf("overflow adding expiration set counts: %+v", es) } - - return onTime + early, nil + return es.ActiveCount + es.FaultyCount, nil } // validates a set of assertions that must hold for expiration sets func (es *ExpirationSet) ValidateState() error { if es.OnTimePledge.LessThan(big.Zero()) { - return xerrors.Errorf("ESet left with negative pledge: %+v", es) + return xerrors.Errorf("expiration set with negative pledge: %+v", es) } - - if es.ActivePower.Raw.LessThan(big.Zero()) { - return xerrors.Errorf("ESet left with negative raw active power: %+v", es) + onTime, err := es.OnTimeSectors.Count() + if err != nil { + return err } - - if es.ActivePower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("ESet left with negative qa active power: %+v", es) + early, err := es.EarlySectors.Count() + if err != nil { + return err } - if es.FaultyPower.Raw.LessThan(big.Zero()) { - return xerrors.Errorf("ESet left with negative raw faulty power: %+v", es) + if onTime > math.MaxUint64-early { + return xerrors.Errorf("overflow adding expiration set bitfield counts: %+v", es) } - - if es.FaultyPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("ESet left with negative qa faulty power: %+v", es) + if es.ActiveCount > math.MaxUint64-es.FaultyCount { + return xerrors.Errorf("overflow adding expiration set state counts: %+v", es) + } + if onTime+early != es.ActiveCount+es.FaultyCount { + return xerrors.Errorf("expiration set inconsistent, on-time (%d) + early (%d) != active (%d) + faulty (%d)", + onTime, early, es.ActiveCount, es.FaultyCount) } - return nil } @@ -173,72 +181,67 @@ func LoadExpirationQueue(store adt.Store, root cid.Cid, quant builtin.QuantSpec, // Adds a collection of sectors to their on-time target expiration entries (quantized). // The sectors are assumed to be active (non-faulty). -// Returns the sector numbers, power, and pledge added. -func (q ExpirationQueue) AddActiveSectors(sectors []*SectorOnChainInfo, ssize abi.SectorSize) (bitfield.BitField, PowerPair, abi.TokenAmount, error) { - totalPower := NewPowerPairZero() +// Returns the sector numbers, count thereof, and pledge added. +func (q ExpirationQueue) AddActiveSectors(sectors []*SectorOnChainInfo) (bitfield.BitField, abi.TokenAmount, error) { totalPledge := big.Zero() var totalSectors []bitfield.BitField noEarlySectors := bitfield.New() - noFaultyPower := NewPowerPairZero() - for _, group := range groupNewSectorsByDeclaredExpiration(ssize, sectors, q.quant) { + for _, group := range groupNewSectorsByDeclaredExpiration(sectors, q.quant) { snos := bitfield.NewFromSet(group.sectors) - if err := q.add(group.epoch, snos, noEarlySectors, group.power, noFaultyPower, group.pledge); err != nil { - return bitfield.BitField{}, NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to record new sector expirations: %w", err) + if err := q.add(group.epoch, snos, noEarlySectors, uint64(len(group.sectors)), 0, group.pledge); err != nil { + return bitfield.BitField{}, big.Zero(), xerrors.Errorf("failed to record new sector expirations: %w", err) } totalSectors = append(totalSectors, snos) - totalPower = totalPower.Add(group.power) totalPledge = big.Add(totalPledge, group.pledge) } snos, err := bitfield.MultiMerge(totalSectors...) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), big.Zero(), err + return bitfield.BitField{}, big.Zero(), err } - return snos, totalPower, totalPledge, nil + return snos, totalPledge, nil } // Re-schedules sectors to expire at an early expiration epoch (quantized), if they wouldn't expire before then anyway. // The sectors must not be currently faulty, so must be registered as expiring on-time rather than early. // The pledge for the now-early sectors is removed from the queue. -// Returns the total power represented by the sectors. -func (q ExpirationQueue) RescheduleAsFaults(newExpiration abi.ChainEpoch, sectors []*SectorOnChainInfo, ssize abi.SectorSize) (PowerPair, error) { +func (q ExpirationQueue) RescheduleAsFaults(newExpiration abi.ChainEpoch, sectors []*SectorOnChainInfo) error { var sectorsTotal []uint64 - expiringPower := NewPowerPairZero() - rescheduledPower := NewPowerPairZero() + rescheduledCount := uint64(0) // Group sectors by their target expiration, then remove from existing queue entries according to those groups. - groups, err := q.findSectorsByExpiration(ssize, sectors) + groups, err := q.findSectorsByExpiration(sectors) if err != nil { - return NewPowerPairZero(), err + return err } for _, group := range groups { var err error + groupCount := uint64(len(group.sectors)) if group.epoch <= q.quant.QuantizeUp(newExpiration) { // Don't reschedule sectors that are already due to expire on-time before the fault-driven expiration, - // but do represent their power as now faulty. + // but do represent them as now faulty. // Their pledge remains as "on-time". - group.expirationSet.ActivePower = group.expirationSet.ActivePower.Sub(group.power) - group.expirationSet.FaultyPower = group.expirationSet.FaultyPower.Add(group.power) - expiringPower = expiringPower.Add(group.power) + group.expirationSet.ActiveCount -= groupCount + group.expirationSet.FaultyCount += groupCount } else { - // Remove sectors from on-time expiry and active power. + // Remove sectors from on-time expiry and active sets. sectorsBf := bitfield.NewFromSet(group.sectors) if group.expirationSet.OnTimeSectors, err = bitfield.SubtractBitField(group.expirationSet.OnTimeSectors, sectorsBf); err != nil { - return NewPowerPairZero(), err + return err } group.expirationSet.OnTimePledge = big.Sub(group.expirationSet.OnTimePledge, group.pledge) - group.expirationSet.ActivePower = group.expirationSet.ActivePower.Sub(group.power) + group.expirationSet.ActiveCount -= groupCount - // Accumulate the sectors and power removed. + // Accumulate the sectors removed. sectorsTotal = append(sectorsTotal, group.sectors...) - rescheduledPower = rescheduledPower.Add(group.power) + rescheduledCount += groupCount } if err = q.mustUpdateOrDelete(group.epoch, group.expirationSet); err != nil { - return NewPowerPairZero(), err + return err } if err = group.expirationSet.ValidateState(); err != nil { - return NewPowerPairZero(), err + return err } } @@ -247,29 +250,28 @@ func (q ExpirationQueue) RescheduleAsFaults(newExpiration abi.ChainEpoch, sector earlySectors := bitfield.NewFromSet(sectorsTotal) noOnTimeSectors := bitfield.New() noOnTimePledge := abi.NewTokenAmount(0) - noActivePower := NewPowerPairZero() - if err := q.add(newExpiration, noOnTimeSectors, earlySectors, noActivePower, rescheduledPower, noOnTimePledge); err != nil { - return NewPowerPairZero(), err + noActiveCount := uint64(0) + if err := q.add(newExpiration, noOnTimeSectors, earlySectors, noActiveCount, rescheduledCount, noOnTimePledge); err != nil { + return err } } - - return rescheduledPower.Add(expiringPower), nil + return nil } // Re-schedules *all* sectors to expire at an early expiration epoch, if they wouldn't expire before then anyway. func (q ExpirationQueue) RescheduleAllAsFaults(faultExpiration abi.ChainEpoch) error { var rescheduledEpochs []uint64 var rescheduledSectors []bitfield.BitField - rescheduledPower := NewPowerPairZero() + rescheduledCount := uint64(0) var es ExpirationSet if err := q.Array.ForEach(&es, func(e int64) error { epoch := abi.ChainEpoch(e) if epoch <= q.quant.QuantizeUp(faultExpiration) { - // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. + // Regardless of whether the sectors were expiring on-time or early, they are all now faulty. // Pledge is still on-time. - es.FaultyPower = es.FaultyPower.Add(es.ActivePower) - es.ActivePower = NewPowerPairZero() + es.FaultyCount += es.ActiveCount + es.ActiveCount = 0 if err := q.mustUpdate(epoch, &es); err != nil { return err } @@ -282,8 +284,7 @@ func (q ExpirationQueue) RescheduleAllAsFaults(faultExpiration abi.ChainEpoch) e return xerrors.Errorf("attempted to re-schedule early expirations to an even earlier epoch") } rescheduledSectors = append(rescheduledSectors, es.OnTimeSectors) - rescheduledPower = rescheduledPower.Add(es.ActivePower) - rescheduledPower = rescheduledPower.Add(es.FaultyPower) + rescheduledCount += es.ActiveCount + es.FaultyCount } if err := es.ValidateState(); err != nil { @@ -306,9 +307,9 @@ func (q ExpirationQueue) RescheduleAllAsFaults(faultExpiration abi.ChainEpoch) e return xerrors.Errorf("failed to merge rescheduled sectors: %w", err) } noOnTimeSectors := bitfield.New() - noActivePower := NewPowerPairZero() + noActiveCount := uint64(0) noOnTimePledge := abi.NewTokenAmount(0) - if err = q.add(faultExpiration, noOnTimeSectors, allRescheduled, noActivePower, rescheduledPower, noOnTimePledge); err != nil { + if err = q.add(faultExpiration, noOnTimeSectors, allRescheduled, noActiveCount, rescheduledCount, noOnTimePledge); err != nil { return err } @@ -323,9 +324,9 @@ func (q ExpirationQueue) RescheduleAllAsFaults(faultExpiration abi.ChainEpoch) e // Removes sectors from any queue entries in which they appear that are earlier then their scheduled expiration epoch, // and schedules them at their expected termination epoch. // Pledge for the sectors is re-added as on-time. -// Power for the sectors is changed from faulty to active (whether rescheduled or not). -// Returns the newly-recovered power. Fails if any sectors are not found in the queue. -func (q ExpirationQueue) RescheduleRecovered(sectors []*SectorOnChainInfo, ssize abi.SectorSize) (PowerPair, error) { +// The sectors are changed from faulty to active (whether rescheduled or not). +// Returns the count of newly-recovered sectors. Fails if any sectors are not found in the queue. +func (q ExpirationQueue) RescheduleRecovered(sectors []*SectorOnChainInfo) (uint64, error) { remaining := make(map[abi.SectorNumber]struct{}, len(sectors)) for _, s := range sectors { remaining[s.SectorNumber] = struct{}{} @@ -335,7 +336,7 @@ func (q ExpirationQueue) RescheduleRecovered(sectors []*SectorOnChainInfo, ssize // We expect this to find all recovering sectors within the first FaultMaxAge/WPoStProvingPeriod entries // (i.e. 14 for 14-day faults), but if something has gone wrong it's safer not to fail if that's not met. var sectorsRescheduled []*SectorOnChainInfo - recoveredPower := NewPowerPairZero() + recoveredCount := uint64(0) if err := q.traverseMutate(func(epoch abi.ChainEpoch, es *ExpirationSet) (changed, keepGoing bool, err error) { onTimeSectors, err := es.OnTimeSectors.AllMap(entrySectorsMax) if err != nil { @@ -352,22 +353,21 @@ func (q ExpirationQueue) RescheduleRecovered(sectors []*SectorOnChainInfo, ssize // The length of sectors has a maximum of one partition size. for _, sector := range sectors { sno := uint64(sector.SectorNumber) - power := PowerForSector(ssize, sector) var found bool if _, found = onTimeSectors[sno]; found { - // If the sector expires on-time at this epoch, leave it here but change faulty power to active. + // If the sector expires on-time at this epoch, leave it here but change faulty to active. // The pledge is already part of the on-time pledge at this entry. - es.FaultyPower = es.FaultyPower.Sub(power) - es.ActivePower = es.ActivePower.Add(power) + es.FaultyCount -= 1 + es.ActiveCount += 1 } else if _, found = earlySectors[sno]; found { // If the sector expires early at this epoch, remove it for re-scheduling. // It's not part of the on-time pledge number here. es.EarlySectors.Unset(sno) - es.FaultyPower = es.FaultyPower.Sub(power) + es.FaultyCount -= 1 sectorsRescheduled = append(sectorsRescheduled, sector) } if found { - recoveredPower = recoveredPower.Add(power) + recoveredCount += 1 delete(remaining, sector.SectorNumber) changed = true } @@ -379,57 +379,51 @@ func (q ExpirationQueue) RescheduleRecovered(sectors []*SectorOnChainInfo, ssize return changed, len(remaining) > 0, nil }); err != nil { - return NewPowerPairZero(), err + return 0, err } if len(remaining) > 0 { - return NewPowerPairZero(), xerrors.Errorf("sectors not found in expiration queue: %v", remaining) + return 0, xerrors.Errorf("sectors not found in expiration queue: %v", remaining) } // Re-schedule the removed sectors to their target expiration. - if _, _, _, err := q.AddActiveSectors(sectorsRescheduled, ssize); err != nil { - return NewPowerPairZero(), err + if _, _, err := q.AddActiveSectors(sectorsRescheduled); err != nil { + return 0, err } - return recoveredPower, nil + return recoveredCount, nil } // Removes some sectors and adds some others. // The sectors being replaced must not be faulty, so must be scheduled for on-time rather than early expiration. // The sectors added are assumed to be not faulty. -// Returns the old a new sector number bitfields, and delta to power and pledge, new minus old. -func (q ExpirationQueue) ReplaceSectors(oldSectors, newSectors []*SectorOnChainInfo, ssize abi.SectorSize) (bitfield.BitField, bitfield.BitField, PowerPair, abi.TokenAmount, error) { - oldSnos, oldPower, oldPledge, err := q.removeActiveSectors(oldSectors, ssize) +// Returns the old and new sector number bitfields, and delta to pledge (new minus old). +func (q ExpirationQueue) ReplaceSectors(oldSectors, newSectors []*SectorOnChainInfo) (bitfield.BitField, bitfield.BitField, abi.TokenAmount, error) { + oldSnos, oldPledge, err := q.removeActiveSectors(oldSectors) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to remove replaced sectors: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, big.Zero(), xerrors.Errorf("failed to remove replaced sectors: %w", err) } - newSnos, newPower, newPledge, err := q.AddActiveSectors(newSectors, ssize) + newSnos, newPledge, err := q.AddActiveSectors(newSectors) if err != nil { - return bitfield.BitField{}, bitfield.BitField{}, NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to add replacement sectors: %w", err) + return bitfield.BitField{}, bitfield.BitField{}, big.Zero(), xerrors.Errorf("failed to add replacement sectors: %w", err) } - return oldSnos, newSnos, newPower.Sub(oldPower), big.Sub(newPledge, oldPledge), nil + return oldSnos, newSnos, big.Sub(newPledge, oldPledge), nil } // Remove some sectors from the queue. // The sectors may be active or faulty, and scheduled either for on-time or early termination. -// Returns the aggregate of removed sectors and power, and recovering power. +// Returns the aggregate of removed sectors. // Fails if any sectors are not found in the queue. -func (q ExpirationQueue) RemoveSectors(sectors []*SectorOnChainInfo, faults bitfield.BitField, recovering bitfield.BitField, - ssize abi.SectorSize) (*ExpirationSet, PowerPair, error) { +func (q ExpirationQueue) RemoveSectors(sectors []*SectorOnChainInfo, faults bitfield.BitField) (*ExpirationSet, error) { remaining := make(map[abi.SectorNumber]struct{}, len(sectors)) for _, s := range sectors { remaining[s.SectorNumber] = struct{}{} } faultsMap, err := faults.AllMap(AddressedSectorsMax) if err != nil { - return nil, NewPowerPairZero(), xerrors.Errorf("failed to expand faults: %w", err) - } - recoveringMap, err := recovering.AllMap(AddressedSectorsMax) - if err != nil { - return nil, NewPowerPairZero(), xerrors.Errorf("failed to expand recoveries: %w", err) + return nil, xerrors.Errorf("failed to expand faults: %w", err) } // results removed := NewExpirationSetEmpty() - recoveringPower := NewPowerPairZero() // Split into faulty and non-faulty. We process non-faulty sectors first // because they always expire on-time so we know where to find them. @@ -448,9 +442,10 @@ func (q ExpirationQueue) RemoveSectors(sectors []*SectorOnChainInfo, faults bitf } // Remove non-faulty sectors. - removed.OnTimeSectors, removed.ActivePower, removed.OnTimePledge, err = q.removeActiveSectors(nonFaultySectors, ssize) + removed.ActiveCount = uint64(len(nonFaultySectors)) + removed.OnTimeSectors, removed.OnTimePledge, err = q.removeActiveSectors(nonFaultySectors) if err != nil { - return nil, NewPowerPairZero(), xerrors.Errorf("failed to remove on-time recoveries: %w", err) + return nil, xerrors.Errorf("failed to remove on-time recoveries: %w", err) } // Finally, remove faulty sectors (on time and not). These sectors can @@ -483,16 +478,12 @@ func (q ExpirationQueue) RemoveSectors(sectors []*SectorOnChainInfo, faults bitf removed.EarlySectors.Set(sno) } if found { - power := PowerForSector(ssize, sector) if _, f := faultsMap[sno]; f { - es.FaultyPower = es.FaultyPower.Sub(power) - removed.FaultyPower = removed.FaultyPower.Add(power) + es.FaultyCount -= 1 + removed.FaultyCount += 1 } else { - es.ActivePower = es.ActivePower.Sub(power) - removed.ActivePower = removed.ActivePower.Add(power) - } - if _, r := recoveringMap[sno]; r { - recoveringPower = recoveringPower.Add(power) + es.ActiveCount -= 1 + removed.ActiveCount += 1 } delete(remaining, sector.SectorNumber) changed = true @@ -505,21 +496,21 @@ func (q ExpirationQueue) RemoveSectors(sectors []*SectorOnChainInfo, faults bitf return changed, len(remaining) > 0, nil }); err != nil { - return nil, recoveringPower, err + return nil, err } if len(remaining) > 0 { - return NewExpirationSetEmpty(), NewPowerPairZero(), xerrors.Errorf("sectors not found in expiration queue: %v", remaining) + return NewExpirationSetEmpty(), xerrors.Errorf("sectors not found in expiration queue: %v", remaining) } - return removed, recoveringPower, nil + return removed, nil } // Removes and aggregates entries from the queue up to and including some epoch. func (q ExpirationQueue) PopUntil(until abi.ChainEpoch) (*ExpirationSet, error) { var onTimeSectors []bitfield.BitField var earlySectors []bitfield.BitField - activePower := NewPowerPairZero() - faultyPower := NewPowerPairZero() + activeCount := uint64(0) + faultyCount := uint64(0) onTimePledge := big.Zero() var poppedKeys []uint64 @@ -532,8 +523,8 @@ func (q ExpirationQueue) PopUntil(until abi.ChainEpoch) (*ExpirationSet, error) poppedKeys = append(poppedKeys, uint64(i)) onTimeSectors = append(onTimeSectors, thisValue.OnTimeSectors) earlySectors = append(earlySectors, thisValue.EarlySectors) - activePower = activePower.Add(thisValue.ActivePower) - faultyPower = faultyPower.Add(thisValue.FaultyPower) + activeCount += thisValue.ActiveCount + faultyCount += thisValue.FaultyCount onTimePledge = big.Add(onTimePledge, thisValue.OnTimePledge) return nil }); err != nil && err != stopErr { @@ -552,10 +543,10 @@ func (q ExpirationQueue) PopUntil(until abi.ChainEpoch) (*ExpirationSet, error) if err != nil { return nil, err } - return NewExpirationSet(allOnTime, allEarly, onTimePledge, activePower, faultyPower), nil + return NewExpirationSet(allOnTime, allEarly, onTimePledge, activeCount, faultyCount) } -func (q ExpirationQueue) add(rawEpoch abi.ChainEpoch, onTimeSectors, earlySectors bitfield.BitField, activePower, faultyPower PowerPair, +func (q ExpirationQueue) add(rawEpoch abi.ChainEpoch, onTimeSectors, earlySectors bitfield.BitField, activeCount, faultyCount uint64, pledge abi.TokenAmount) error { epoch := q.quant.QuantizeUp(rawEpoch) es, err := q.mayGet(epoch) @@ -563,14 +554,14 @@ func (q ExpirationQueue) add(rawEpoch abi.ChainEpoch, onTimeSectors, earlySector return err } - if err = es.Add(onTimeSectors, earlySectors, pledge, activePower, faultyPower); err != nil { + if err = es.Add(onTimeSectors, earlySectors, pledge, activeCount, faultyCount); err != nil { return xerrors.Errorf("failed to add expiration values for epoch %v: %w", epoch, err) } return q.mustUpdate(epoch, es) } -func (q ExpirationQueue) remove(rawEpoch abi.ChainEpoch, onTimeSectors, earlySectors bitfield.BitField, activePower, faultyPower PowerPair, +func (q ExpirationQueue) remove(rawEpoch abi.ChainEpoch, onTimeSectors, earlySectors bitfield.BitField, activeCount, faultyCount uint64, pledge abi.TokenAmount) error { epoch := q.quant.QuantizeUp(rawEpoch) var es ExpirationSet @@ -580,38 +571,36 @@ func (q ExpirationQueue) remove(rawEpoch abi.ChainEpoch, onTimeSectors, earlySec return xerrors.Errorf("missing expected expiration set at epoch %v", epoch) } - if err := es.Remove(onTimeSectors, earlySectors, pledge, activePower, faultyPower); err != nil { + if err := es.Remove(onTimeSectors, earlySectors, pledge, activeCount, faultyCount); err != nil { return xerrors.Errorf("failed to remove expiration values for queue epoch %v: %w", epoch, err) } return q.mustUpdateOrDelete(epoch, &es) } -func (q ExpirationQueue) removeActiveSectors(sectors []*SectorOnChainInfo, ssize abi.SectorSize) (bitfield.BitField, PowerPair, abi.TokenAmount, error) { +func (q ExpirationQueue) removeActiveSectors(sectors []*SectorOnChainInfo) (bitfield.BitField, abi.TokenAmount, error) { removedSnos := bitfield.New() - removedPower := NewPowerPairZero() removedPledge := big.Zero() noEarlySectors := bitfield.New() - noFaultyPower := NewPowerPairZero() + noFaultyCount := uint64(0) // Group sectors by their expiration, then remove from existing queue entries according to those groups. - groups, err := q.findSectorsByExpiration(ssize, sectors) + groups, err := q.findSectorsByExpiration(sectors) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), big.Zero(), err + return bitfield.BitField{}, big.Zero(), err } for _, group := range groups { sectorsBf := bitfield.NewFromSet(group.sectors) - if err := q.remove(group.epoch, sectorsBf, noEarlySectors, group.power, noFaultyPower, group.pledge); err != nil { - return bitfield.BitField{}, NewPowerPairZero(), big.Zero(), err + if err := q.remove(group.epoch, sectorsBf, noEarlySectors, uint64(len(group.sectors)), noFaultyCount, group.pledge); err != nil { + return bitfield.BitField{}, big.Zero(), err } for _, n := range group.sectors { removedSnos.Set(n) } - removedPower = removedPower.Add(group.power) removedPledge = big.Add(removedPledge, group.pledge) } - return removedSnos, removedPower, removedPledge, nil + return removedSnos, removedPledge, nil } // Traverses the entire queue with a callback function that may mutate entries. @@ -626,7 +615,7 @@ func (q ExpirationQueue) traverseMutate(f func(epoch abi.ChainEpoch, es *Expirat if err != nil { return err } else if changed { - if emptied, err := es.IsEmpty(); err != nil { + if emptied := es.IsEmpty(); err != nil { return err } else if emptied { epochsEmptied = append(epochsEmptied, uint64(epoch)) @@ -672,13 +661,11 @@ func (q ExpirationQueue) mustUpdate(epoch abi.ChainEpoch, es *ExpirationSet) err // Since this might delete the node, it's not safe for use inside an iteration. func (q ExpirationQueue) mustUpdateOrDelete(epoch abi.ChainEpoch, es *ExpirationSet) error { - if empty, err := es.IsEmpty(); err != nil { - return err - } else if empty { - if err = q.Array.Delete(uint64(epoch)); err != nil { + if es.IsEmpty() { + if err := q.Array.Delete(uint64(epoch)); err != nil { return xerrors.Errorf("failed to delete queue epoch %d: %w", epoch, err) } - } else if err = q.Array.Set(uint64(epoch), es); err != nil { + } else if err := q.Array.Set(uint64(epoch), es); err != nil { return xerrors.Errorf("failed to set queue epoch %v: %w", epoch, err) } return nil @@ -687,7 +674,6 @@ func (q ExpirationQueue) mustUpdateOrDelete(epoch abi.ChainEpoch, es *Expiration type sectorEpochSet struct { epoch abi.ChainEpoch sectors []uint64 - power PowerPair pledge abi.TokenAmount } @@ -700,7 +686,7 @@ type sectorExpirationSet struct { // sorted by expiration epoch, quantized. // // Note: While the result is sorted by epoch, the order of per-epoch sectors is maintained. -func groupNewSectorsByDeclaredExpiration(sectorSize abi.SectorSize, sectors []*SectorOnChainInfo, quant builtin.QuantSpec) []sectorEpochSet { +func groupNewSectorsByDeclaredExpiration(sectors []*SectorOnChainInfo, quant builtin.QuantSpec) []sectorEpochSet { sectorsByExpiration := make(map[abi.ChainEpoch][]*SectorOnChainInfo) for _, sector := range sectors { @@ -713,17 +699,14 @@ func groupNewSectorsByDeclaredExpiration(sectorSize abi.SectorSize, sectors []*S // This map iteration is non-deterministic but safe because we sort by epoch below. for expiration, epochSectors := range sectorsByExpiration { //nolint:nomaprange // result is subsequently sorted sectorNumbers := make([]uint64, len(epochSectors)) - totalPower := NewPowerPairZero() totalPledge := big.Zero() for i, sector := range epochSectors { sectorNumbers[i] = uint64(sector.SectorNumber) - totalPower = totalPower.Add(PowerForSector(sectorSize, sector)) totalPledge = big.Add(totalPledge, sector.InitialPledge) } sectorEpochSets = append(sectorEpochSets, sectorEpochSet{ epoch: expiration, sectors: sectorNumbers, - power: totalPower, pledge: totalPledge, }) } @@ -739,7 +722,7 @@ func groupNewSectorsByDeclaredExpiration(sectorSize abi.SectorSize, sectors []*S // (i.e. they have been rescheduled) traverse expiration sets to for groups where these // sectors actually expire. // Groups will be returned in expiration order, earliest first. -func (q *ExpirationQueue) findSectorsByExpiration(sectorSize abi.SectorSize, sectors []*SectorOnChainInfo) ([]sectorExpirationSet, error) { +func (q *ExpirationQueue) findSectorsByExpiration(sectors []*SectorOnChainInfo) ([]sectorExpirationSet, error) { declaredExpirations := make(map[abi.ChainEpoch]bool, len(sectors)) sectorsByNumber := make(map[uint64]*SectorOnChainInfo, len(sectors)) allRemaining := make(map[uint64]struct{}) @@ -762,7 +745,7 @@ func (q *ExpirationQueue) findSectorsByExpiration(sectorSize abi.SectorSize, sec // create group from overlap var group sectorExpirationSet - group, allRemaining, err = groupExpirationSet(sectorSize, sectorsByNumber, allRemaining, es, expiration) + group, allRemaining, err = groupExpirationSet(sectorsByNumber, allRemaining, es, expiration) if err != nil { return nil, err } @@ -782,7 +765,7 @@ func (q *ExpirationQueue) findSectorsByExpiration(sectorSize abi.SectorSize, sec } // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption - // of grouping is that it only returns sectors with active power. ExpirationQueue should not + // of grouping is that it only returns active sectors. ExpirationQueue should not // provide operations that allow this to happen. if err := assertNoEarlySectors(allRemaining, es); err != nil { return true, err @@ -790,7 +773,7 @@ func (q *ExpirationQueue) findSectorsByExpiration(sectorSize abi.SectorSize, sec var group sectorExpirationSet var err error - group, allRemaining, err = groupExpirationSet(sectorSize, sectorsByNumber, allRemaining, es, epoch) + group, allRemaining, err = groupExpirationSet(sectorsByNumber, allRemaining, es, epoch) if err != nil { return false, err } @@ -819,17 +802,14 @@ func (q *ExpirationQueue) findSectorsByExpiration(sectorSize abi.SectorSize, sec // Takes a slice of sector infos a bitfield of sector numbers and returns a single group for all bitfield sectors // Also returns a bitfield containing sectors not found in expiration set. // This method mutates includeSet by removing sector numbers of sectors found in expiration set. -func groupExpirationSet(sectorSize abi.SectorSize, sectors map[uint64]*SectorOnChainInfo, - includeSet map[uint64]struct{}, es *ExpirationSet, expiration abi.ChainEpoch, -) (sectorExpirationSet, map[uint64]struct{}, error) { +func groupExpirationSet(sectors map[uint64]*SectorOnChainInfo, includeSet map[uint64]struct{}, es *ExpirationSet, + expiration abi.ChainEpoch) (sectorExpirationSet, map[uint64]struct{}, error) { var sectorNumbers []uint64 - totalPower := NewPowerPairZero() totalPledge := big.Zero() err := es.OnTimeSectors.ForEach(func(u uint64) error { if _, found := includeSet[u]; found { sector := sectors[u] sectorNumbers = append(sectorNumbers, u) - totalPower = totalPower.Add(PowerForSector(sectorSize, sector)) totalPledge = big.Add(totalPledge, sector.InitialPledge) delete(includeSet, u) } @@ -843,7 +823,6 @@ func groupExpirationSet(sectorSize abi.SectorSize, sectors map[uint64]*SectorOnC sectorEpochSet: sectorEpochSet{ epoch: expiration, sectors: sectorNumbers, - power: totalPower, pledge: totalPledge, }, expirationSet: es, diff --git a/actors/builtin/miner/expiration_queue_internal_test.go b/actors/builtin/miner/expiration_queue_internal_test.go index 73473791b..56c7fa41e 100644 --- a/actors/builtin/miner/expiration_queue_internal_test.go +++ b/actors/builtin/miner/expiration_queue_internal_test.go @@ -18,16 +18,14 @@ func TestExpirations(t *testing.T) { testSector(14, 3, 0, 0, 0), testSector(13, 4, 0, 0, 0), } - result := groupNewSectorsByDeclaredExpiration(2048, sectors, quant) + result := groupNewSectorsByDeclaredExpiration(sectors, quant) expected := []*sectorEpochSet{{ epoch: 13, sectors: []uint64{1, 2, 4}, - power: NewPowerPair(big.NewIntUnsigned(2048*3), big.NewIntUnsigned(2048*3)), pledge: big.Zero(), }, { epoch: 23, sectors: []uint64{3}, - power: NewPowerPair(big.NewIntUnsigned(2048), big.NewIntUnsigned(2048)), pledge: big.Zero(), }} require.Equal(t, len(expected), len(result)) @@ -38,7 +36,7 @@ func TestExpirations(t *testing.T) { func TestExpirationsEmpty(t *testing.T) { sectors := []*SectorOnChainInfo{} - result := groupNewSectorsByDeclaredExpiration(2048, sectors, builtin.NoQuantization) + result := groupNewSectorsByDeclaredExpiration(sectors, builtin.NoQuantization) expected := []sectorEpochSet{} require.Equal(t, expected, result) } @@ -46,7 +44,6 @@ func TestExpirationsEmpty(t *testing.T) { func assertSectorSet(t *testing.T, expected, actual *sectorEpochSet) { assert.Equal(t, expected.epoch, actual.epoch) assert.Equal(t, expected.sectors, actual.sectors) - assert.True(t, expected.power.Equals(actual.power), "expected %v, actual %v", expected.power, actual.power) assert.True(t, expected.pledge.Equals(actual.pledge), "expected %v, actual %v", expected.pledge, actual.pledge) } @@ -54,8 +51,6 @@ func testSector(expiration, number, weight, vweight, pledge int64) *SectorOnChai return &SectorOnChainInfo{ Expiration: abi.ChainEpoch(expiration), SectorNumber: abi.SectorNumber(number), - DealWeight: big.NewInt(weight), - VerifiedDealWeight: big.NewInt(vweight), InitialPledge: abi.NewTokenAmount(pledge), } } diff --git a/actors/builtin/miner/expiration_queue_test.go b/actors/builtin/miner/expiration_queue_test.go index 7241f17cb..ed57369ce 100644 --- a/actors/builtin/miner/expiration_queue_test.go +++ b/actors/builtin/miner/expiration_queue_test.go @@ -22,20 +22,20 @@ func TestExpirationSet(t *testing.T) { onTimeSectors := bitfield.NewFromSet([]uint64{5, 8, 9}) earlySectors := bitfield.NewFromSet([]uint64{2, 3}) onTimePledge := abi.NewTokenAmount(1000) - activePower := miner.NewPowerPair(abi.NewStoragePower(1<<13), abi.NewStoragePower(1<<14)) - faultyPower := miner.NewPowerPair(abi.NewStoragePower(1<<11), abi.NewStoragePower(1<<12)) + activeCount := uint64(4) + faultyCount := uint64(1) t.Run("adds sectors and power to empty set", func(t *testing.T) { set := miner.NewExpirationSetEmpty() - err := set.Add(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + err := set.Add(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) require.NoError(t, err) assertBitfieldEquals(t, set.OnTimeSectors, 5, 8, 9) assertBitfieldEquals(t, set.EarlySectors, 2, 3) assert.Equal(t, onTimePledge, set.OnTimePledge) - assert.True(t, activePower.Equals(set.ActivePower)) - assert.True(t, faultyPower.Equals(set.FaultyPower)) + assert.Equal(t, activeCount, set.ActiveCount) + assert.Equal(t, faultyCount, set.FaultyCount) count, err := set.Count() require.NoError(t, err) @@ -43,49 +43,48 @@ func TestExpirationSet(t *testing.T) { }) t.Run("adds sectors and power to non-empty set", func(t *testing.T) { - set := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + set, err := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) + require.NoError(t, err) - err := set.Add( + err = set.Add( bitfield.NewFromSet([]uint64{6, 7, 11}), bitfield.NewFromSet([]uint64{1, 4}), abi.NewTokenAmount(300), - miner.NewPowerPair(abi.NewStoragePower(3*(1<<13)), abi.NewStoragePower(3*(1<<14))), - miner.NewPowerPair(abi.NewStoragePower(3*(1<<11)), abi.NewStoragePower(3*(1<<12))), + 4, + 1, ) require.NoError(t, err) assertBitfieldEquals(t, set.OnTimeSectors, 5, 6, 7, 8, 9, 11) assertBitfieldEquals(t, set.EarlySectors, 1, 2, 3, 4) assert.Equal(t, abi.NewTokenAmount(1300), set.OnTimePledge) - active := miner.NewPowerPair(abi.NewStoragePower(1<<15), abi.NewStoragePower(1<<16)) - assert.True(t, active.Equals(set.ActivePower)) - faulty := miner.NewPowerPair(abi.NewStoragePower(1<<13), abi.NewStoragePower(1<<14)) - assert.True(t, faulty.Equals(set.FaultyPower)) + assert.Equal(t, uint64(8), set.ActiveCount) + assert.Equal(t, uint64(2), set.FaultyCount) }) t.Run("removes sectors and power set", func(t *testing.T) { - set := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + set, err := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) + require.NoError(t, err) - err := set.Remove( + err = set.Remove( bitfield.NewFromSet([]uint64{9}), bitfield.NewFromSet([]uint64{2}), abi.NewTokenAmount(800), - miner.NewPowerPair(abi.NewStoragePower(3*(1<<11)), abi.NewStoragePower(3*(1<<12))), - miner.NewPowerPair(abi.NewStoragePower(3*(1<<9)), abi.NewStoragePower(3*(1<<10))), + 1, + 1, ) require.NoError(t, err) assertBitfieldEquals(t, set.OnTimeSectors, 5, 8) assertBitfieldEquals(t, set.EarlySectors, 3) assert.Equal(t, abi.NewTokenAmount(200), set.OnTimePledge) - active := miner.NewPowerPair(abi.NewStoragePower(1<<11), abi.NewStoragePower(1<<12)) - assert.True(t, active.Equals(set.ActivePower)) - faulty := miner.NewPowerPair(abi.NewStoragePower(1<<9), abi.NewStoragePower(1<<10)) - assert.True(t, faulty.Equals(set.FaultyPower)) + assert.Equal(t, uint64(3), set.ActiveCount) + assert.Equal(t, uint64(0), set.FaultyCount) }) t.Run("remove fails when pledge underflows", func(t *testing.T) { - set := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + set, err := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) + require.NoError(t, err) err := set.Remove( bitfield.NewFromSet([]uint64{9}), @@ -99,7 +98,8 @@ func TestExpirationSet(t *testing.T) { }) t.Run("remove fails to remove sectors it does not contain", func(t *testing.T) { - set := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + set, err := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) + require.NoError(t, err) // remove unknown active sector 12 err := set.Remove( @@ -125,7 +125,8 @@ func TestExpirationSet(t *testing.T) { }) t.Run("remove fails when active or fault qa power underflows", func(t *testing.T) { - set := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + set, err := miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) + require.NoError(t, err) // active removed power > active power err := set.Remove( @@ -138,7 +139,7 @@ func TestExpirationSet(t *testing.T) { require.Error(t, err) assert.Contains(t, err.Error(), "power underflow") - set = miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + set = miner.NewExpirationSet(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) // faulty removed power > faulty power err = set.Remove( @@ -155,25 +156,24 @@ func TestExpirationSet(t *testing.T) { t.Run("set is empty when all sectors removed", func(t *testing.T) { set := miner.NewExpirationSetEmpty() - empty, err := set.IsEmpty() - require.NoError(t, err) + empty := set.IsEmpty() assert.True(t, empty) count, err := set.Count() require.NoError(t, err) assert.Zero(t, count) - err = set.Add(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + err = set.Add(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) require.NoError(t, err) - empty, err = set.IsEmpty() + empty = set.IsEmpty() require.NoError(t, err) assert.False(t, empty) - err = set.Remove(onTimeSectors, earlySectors, onTimePledge, activePower, faultyPower) + err = set.Remove(onTimeSectors, earlySectors, onTimePledge, activeCount, faultyCount) require.NoError(t, err) - empty, err = set.IsEmpty() + empty = set.IsEmpty() require.NoError(t, err) assert.True(t, empty) @@ -196,7 +196,7 @@ func TestExpirationQueue(t *testing.T) { t.Run("added sectors can be popped off queue", func(t *testing.T) { queue := emptyExpirationQueue(t) - secNums, power, pledge, err := queue.AddActiveSectors(sectors, sectorSize) + secNums, pledge, err := queue.AddActiveSectors(sectors) require.NoError(t, err) assertBitfieldEquals(t, secNums, 1, 2, 3, 4, 5, 6) assert.True(t, power.Equals(miner.PowerForSectors(sectorSize, sectors))) @@ -218,12 +218,12 @@ func TestExpirationQueue(t *testing.T) { assertBitfieldEquals(t, set.OnTimeSectors, 1, 2, 3) assertBitfieldEmpty(t, set.EarlySectors) - activePower := miner.PowerForSectors(sectorSize, sectors[:3]) + activePower := miner.SectorsPower(sectorSize, 3) faultyPower := miner.NewPowerPairZero() assert.Equal(t, big.NewInt(3003), set.OnTimePledge) // sum of first 3 sector pledges - assert.True(t, activePower.Equals(set.ActivePower)) - assert.True(t, faultyPower.Equals(set.FaultyPower)) + assert.True(t, activePower.Equals(set.ActiveCount)) + assert.True(t, faultyPower.Equals(set.FaultyCount)) // pop off rest up to and including epoch 8 set, err = queue.PopUntil(20) @@ -233,8 +233,8 @@ func TestExpirationQueue(t *testing.T) { assertBitfieldEmpty(t, set.EarlySectors) assert.Equal(t, big.NewInt(3012), set.OnTimePledge) // sum of last 3 sector pledges - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[3:]))) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[3:]))) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) // queue is now empty assert.Equal(t, 0, int(queue.Length())) @@ -242,7 +242,7 @@ func TestExpirationQueue(t *testing.T) { t.Run("quantizes added sectors by expiration", func(t *testing.T) { queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(5, 3), testAmtBitwidth) - secNums, power, pledge, err := queue.AddActiveSectors(sectors, sectorSize) + secNums, pledge, err := queue.AddActiveSectors(sectors) require.NoError(t, err) assertBitfieldEquals(t, secNums, 1, 2, 3, 4, 5, 6) assert.True(t, power.Equals(miner.PowerForSectors(sectorSize, sectors))) @@ -301,7 +301,7 @@ func TestExpirationQueue(t *testing.T) { t.Run("reschedules sectors as faults", func(t *testing.T) { // Create 3 expiration sets with 2 sectors apiece queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, _, _, err := queue.AddActiveSectors(sectors, sectorSize) + _, _, err := queue.AddActiveSectors(sectors) require.NoError(t, err) _, err = queue.Root() @@ -311,9 +311,8 @@ func TestExpirationQueue(t *testing.T) { // This faults one sector from the first set, all of the second set and one from the third. // Faulting at epoch 6 means the first 3 will expire on time, but the last will be early and // moved to the second set - powerDelta, err := queue.RescheduleAsFaults(abi.ChainEpoch(6), sectors[1:5], sectorSize) + err = queue.RescheduleAsFaults(abi.ChainEpoch(6), sectors[1:5]) require.NoError(t, err) - assert.True(t, powerDelta.Equals(miner.PowerForSectors(sectorSize, sectors[1:5]))) _, err = queue.Root() require.NoError(t, err) @@ -327,8 +326,8 @@ func TestExpirationQueue(t *testing.T) { assertBitfieldEmpty(t, set.EarlySectors) assert.Equal(t, big.NewInt(2001), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[0:1]))) - assert.True(t, set.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[1:2]))) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[0:1]))) + assert.True(t, set.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[1:2]))) // expect the second set to have all faulty power and now contain 5th sector as an early sector requireNoExpirationGroupsBefore(t, 9, queue) @@ -341,8 +340,8 @@ func TestExpirationQueue(t *testing.T) { // pledge is kept from original 2 sectors. Pledge from new early sector is NOT added. assert.Equal(t, big.NewInt(2005), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.NewPowerPairZero())) - assert.True(t, set.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[2:5]))) + assert.True(t, set.ActiveCount.Equals(miner.NewPowerPairZero())) + assert.True(t, set.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[2:5]))) // expect last set to only contain non faulty sector requireNoExpirationGroupsBefore(t, 13, queue) @@ -355,14 +354,14 @@ func TestExpirationQueue(t *testing.T) { // Pledge from sector moved from this set is dropped assert.Equal(t, big.NewInt(1005), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[5:]))) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[5:]))) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) }) t.Run("reschedules all sectors as faults", func(t *testing.T) { // Create expiration 3 sets with 2 sectors apiece queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, _, _, err := queue.AddActiveSectors(sectors, sectorSize) + _, _, err := queue.AddActiveSectors(sectors) require.NoError(t, err) _, err = queue.Root() @@ -387,8 +386,8 @@ func TestExpirationQueue(t *testing.T) { assert.Equal(t, big.NewInt(2001), set.OnTimePledge) // pledge is same // active power is converted to fault power - assert.True(t, set.ActivePower.Equals(miner.NewPowerPairZero())) - assert.True(t, set.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[:2]))) + assert.True(t, set.ActiveCount.Equals(miner.NewPowerPairZero())) + assert.True(t, set.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[:2]))) // expect the second set to have all faulty power and now contain 5th and 6th sectors as an early sectors requireNoExpirationGroupsBefore(t, 9, queue) @@ -402,8 +401,8 @@ func TestExpirationQueue(t *testing.T) { assert.Equal(t, big.NewInt(2005), set.OnTimePledge) // fault power is all power for sectors previously in the first and second sets - assert.True(t, set.ActivePower.Equals(miner.NewPowerPairZero())) - assert.True(t, set.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[2:]))) + assert.True(t, set.ActiveCount.Equals(miner.NewPowerPairZero())) + assert.True(t, set.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[2:]))) // expect last set to only contain non faulty sector requireNoExpirationGroupsBefore(t, 13, queue) @@ -416,14 +415,14 @@ func TestExpirationQueue(t *testing.T) { // all pledge is dropped assert.Equal(t, big.Zero(), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.NewPowerPairZero())) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.NewPowerPairZero())) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) }) t.Run("reschedule recover restores all sector stats", func(t *testing.T) { // Create expiration 3 sets with 2 sectors apiece queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, _, _, err := queue.AddActiveSectors(sectors, sectorSize) + _, _, err := queue.AddActiveSectors(sectors) require.NoError(t, err) _, err = queue.Root() @@ -431,14 +430,14 @@ func TestExpirationQueue(t *testing.T) { // Fault middle sectors to expire at epoch 6 to put sectors in a state // described in "reschedules sectors as faults" - _, err = queue.RescheduleAsFaults(abi.ChainEpoch(6), sectors[1:5], sectorSize) + err = queue.RescheduleAsFaults(abi.ChainEpoch(6), sectors[1:5]) require.NoError(t, err) _, err = queue.Root() require.NoError(t, err) // mark faulted sectors as recovered - recovered, err := queue.RescheduleRecovered(sectors[1:5], sectorSize) + recovered, err := queue.RescheduleRecovered(sectors[1:5]) require.NoError(t, err) assert.True(t, recovered.Equals(miner.PowerForSectors(sectorSize, sectors[1:5]))) @@ -453,8 +452,8 @@ func TestExpirationQueue(t *testing.T) { // pledge from both sectors assert.Equal(t, big.NewInt(2001), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[:2]))) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[:2]))) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) // expect second set to have lost early sector 5 and have active power just from 3 and 4 requireNoExpirationGroupsBefore(t, 9, queue) @@ -467,8 +466,8 @@ func TestExpirationQueue(t *testing.T) { // pledge is kept from original 2 sectors assert.Equal(t, big.NewInt(2005), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[2:4]))) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[2:4]))) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) // expect sector 5 to be returned to last setu requireNoExpirationGroupsBefore(t, 13, queue) @@ -481,8 +480,8 @@ func TestExpirationQueue(t *testing.T) { // Pledge from sector 5 is restored assert.Equal(t, big.NewInt(2009), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[4:]))) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[4:]))) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) }) t.Run("replaces sectors with new sectors", func(t *testing.T) { @@ -490,7 +489,7 @@ func TestExpirationQueue(t *testing.T) { queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) // add sectors to each set - _, _, _, err := queue.AddActiveSectors([]*miner.SectorOnChainInfo{sectors[0], sectors[1], sectors[3], sectors[5]}, sectorSize) + _, _, err := queue.AddActiveSectors([]*miner.SectorOnChainInfo{sectors[0], sectors[1], sectors[3], sectors[5]}) require.NoError(t, err) _, err = queue.Root() @@ -499,15 +498,12 @@ func TestExpirationQueue(t *testing.T) { // remove all from first set, replace second set, and append to third toRemove := []*miner.SectorOnChainInfo{sectors[0], sectors[1], sectors[3]} toAdd := []*miner.SectorOnChainInfo{sectors[2], sectors[4]} - removed, added, powerDelta, pledgeDelta, err := queue.ReplaceSectors( - toRemove, - toAdd, - sectorSize) + removed, added, powerDelta, pledgeDelta, err := queue.ReplaceSectors(toRemove, toAdd) require.NoError(t, err) assertBitfieldEquals(t, removed, 1, 2, 4) assertBitfieldEquals(t, added, 3, 5) - addedPower := miner.PowerForSectors(sectorSize, toAdd) - assert.True(t, powerDelta.Equals(addedPower.Sub(miner.PowerForSectors(sectorSize, toRemove)))) + addedPower := miner.SectorsPower(sectorSize, len(toAdd)) + assert.True(t, powerDelta.Equals(addedPower.Sub(miner.SectorsPower(sectorSize, len(toRemove))))) assert.Equal(t, abi.NewTokenAmount(1002+1004-1000-1001-1003), pledgeDelta) // first set is gone @@ -522,8 +518,8 @@ func TestExpirationQueue(t *testing.T) { // pledge and power is only from sector 3 assert.Equal(t, big.NewInt(1002), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[2:3]))) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[2:3]))) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) // last set appends sector 6 requireNoExpirationGroupsBefore(t, 13, queue) @@ -535,21 +531,21 @@ func TestExpirationQueue(t *testing.T) { // pledge and power are some of old and new sectors assert.Equal(t, big.NewInt(2009), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[4:]))) - assert.True(t, set.FaultyPower.Equals(miner.NewPowerPairZero())) + assert.True(t, set.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[4:]))) + assert.True(t, set.FaultyCount.Equals(miner.NewPowerPairZero())) }) t.Run("removes sectors", func(t *testing.T) { // add all sectors into 3 sets queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, _, _, err := queue.AddActiveSectors(sectors, sectorSize) + _, _, err := queue.AddActiveSectors(sectors) require.NoError(t, err) _, err = queue.Root() require.NoError(t, err) // put queue in a state where some sectors are early and some are faulty - _, err = queue.RescheduleAsFaults(abi.ChainEpoch(6), sectors[1:6], sectorSize) + err = queue.RescheduleAsFaults(abi.ChainEpoch(6), sectors[1:6]) require.NoError(t, err) _, err = queue.Root() @@ -563,15 +559,15 @@ func TestExpirationQueue(t *testing.T) { // label the last as recovering recovering := bitfield.NewFromSet([]uint64{6}) - removed, recoveringPower, err := queue.RemoveSectors(toRemove, faults, recovering, sectorSize) + removed, err := queue.RemoveSectors(toRemove, faults) require.NoError(t, err) // assert all return values are correct assertBitfieldEquals(t, removed.OnTimeSectors, 1, 4) assertBitfieldEquals(t, removed.EarlySectors, 5, 6) assert.Equal(t, abi.NewTokenAmount(1000+1003), removed.OnTimePledge) // only on-time sectors - assert.True(t, removed.ActivePower.Equals(miner.PowerForSectors(sectorSize, []*miner.SectorOnChainInfo{sectors[0]}))) - assert.True(t, removed.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[3:6]))) + assert.True(t, removed.ActiveCount.Equals(miner.PowerForSectors(sectorSize, []*miner.SectorOnChainInfo{sectors[0]}))) + assert.True(t, removed.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[3:6]))) assert.True(t, recoveringPower.Equals(miner.PowerForSectors(sectorSize, sectors[5:6]))) // assert queue state is as expected @@ -584,8 +580,8 @@ func TestExpirationQueue(t *testing.T) { assertBitfieldEquals(t, set.OnTimeSectors, 2) assertBitfieldEmpty(t, set.EarlySectors) assert.Equal(t, abi.NewTokenAmount(1001), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.NewPowerPairZero())) - assert.True(t, set.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[1:2]))) + assert.True(t, set.ActiveCount.Equals(miner.NewPowerPairZero())) + assert.True(t, set.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[1:2]))) // only faulty on-time sector 3 is found in second set requireNoExpirationGroupsBefore(t, 9, queue) @@ -595,8 +591,8 @@ func TestExpirationQueue(t *testing.T) { assertBitfieldEquals(t, set.OnTimeSectors, 3) assertBitfieldEmpty(t, set.EarlySectors) assert.Equal(t, abi.NewTokenAmount(1002), set.OnTimePledge) - assert.True(t, set.ActivePower.Equals(miner.NewPowerPairZero())) - assert.True(t, set.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[2:3]))) + assert.True(t, set.ActiveCount.Equals(miner.NewPowerPairZero())) + assert.True(t, set.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[2:3]))) // no further sets remain requireNoExpirationGroupsBefore(t, 20, queue) @@ -604,7 +600,7 @@ func TestExpirationQueue(t *testing.T) { t.Run("adding no sectors leaves the queue empty", func(t *testing.T) { queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, _, _, err := queue.AddActiveSectors(nil, sectorSize) + _, _, err := queue.AddActiveSectors(nil) require.NoError(t, err) assert.Zero(t, queue.Length()) }) @@ -612,12 +608,12 @@ func TestExpirationQueue(t *testing.T) { t.Run("rescheduling no expirations as faults leaves the queue empty", func(t *testing.T) { queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, _, _, err := queue.AddActiveSectors(sectors, sectorSize) + _, _, err := queue.AddActiveSectors(sectors) require.NoError(t, err) // all sectors already expire before epoch 15, nothing should change. length := queue.Length() - _, err = queue.RescheduleAsFaults(15, sectors, sectorSize) + err = queue.RescheduleAsFaults(15, sectors) require.NoError(t, err) assert.Equal(t, length, queue.Length()) }) @@ -625,7 +621,7 @@ func TestExpirationQueue(t *testing.T) { t.Run("rescheduling all expirations as faults leaves the queue empty if it was empty", func(t *testing.T) { queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, _, _, err := queue.AddActiveSectors(sectors, sectorSize) + _, _, err := queue.AddActiveSectors(sectors) require.NoError(t, err) // all sectors already expire before epoch 15, nothing should change. @@ -637,7 +633,7 @@ func TestExpirationQueue(t *testing.T) { t.Run("rescheduling no sectors as recovered leaves the queue empty", func(t *testing.T) { queue := emptyExpirationQueueWithQuantizing(t, builtin.NewQuantSpec(4, 1), testAmtBitwidth) - _, err := queue.RescheduleRecovered(nil, sectorSize) + _, err := queue.RescheduleRecovered(nil) require.NoError(t, err) assert.Zero(t, queue.Length()) }) @@ -660,7 +656,7 @@ func requireNoExpirationGroupsBefore(t *testing.T, epoch abi.ChainEpoch, queue m set, err := queue.PopUntil(epoch - 1) require.NoError(t, err) - empty, err := set.IsEmpty() + empty := set.IsEmpty() require.NoError(t, err) require.True(t, empty) } diff --git a/actors/builtin/miner/miner_actor.go b/actors/builtin/miner/miner_actor.go index 64bbdf105..9bfa2a3ba 100644 --- a/actors/builtin/miner/miner_actor.go +++ b/actors/builtin/miner/miner_actor.go @@ -448,7 +448,7 @@ func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) } // If we're not recovering power, record the proof for optimistic verification. - if postResult.RecoveredPower.IsZero() { + if postResult.RecoveredCount == uint64(0) { err = deadline.RecordPoStProofs(store, postResult.Partitions, params.Proofs) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to record proof for optimistic verification", params.Deadline) } else { @@ -471,7 +471,8 @@ func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) // NOTE: It would be permissible to delay the power loss until the deadline closes, but that would require // additional accounting state. // https://github.com/filecoin-project/specs-actors/issues/414 - requestUpdatePower(rt, postResult.PowerDelta) + powerDelta := big.Mul(SectorPower(info.SectorSize), big.NewInt(postResult.ActiveCountDelta)) + requestUpdatePower(rt, powerDelta) rt.StateReadonly(&st) err := st.CheckBalanceInvariants(rt.CurrentBalance()) @@ -507,16 +508,17 @@ func (a Actor) DisputeWindowedPoSt(rt Runtime, params *DisputeWindowedPoStParams toBurn := abi.NewTokenAmount(0) toReward := abi.NewTokenAmount(0) pledgeDelta := abi.NewTokenAmount(0) - powerDelta := NewPowerPairZero() + activeCountDelta := int64(0) var st State + var info *MinerInfo rt.StateTransaction(&st, func() { + info = getMinerInfo(rt, &st) dlInfo := st.DeadlineInfo(currEpoch) if !deadlineAvailableForOptimisticPoStDispute(dlInfo.PeriodStart, params.Deadline, currEpoch) { rt.Abortf(exitcode.ErrForbidden, "can only dispute window posts during the dispute window (%d epochs after the challenge window closes)", WPoStDisputeWindow) } - info := getMinerInfo(rt, &st) - penalisedPower := NewPowerPairZero() + penalizedSectorCount := uint64(0) store := adt.AsStore(rt) // Check proof @@ -545,9 +547,10 @@ func (a Actor) DisputeWindowedPoSt(rt Runtime, params *DisputeWindowedPoStParams // Load the partition info we need for the dispute. disputeInfo, err := dlCurrent.LoadPartitionsForDispute(store, partitions) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load partition info for dispute") - // This includes power that is no longer active (e.g., due to sector terminations). + // This includes sectors that are no longer active (e.g., due to sector terminations). // It must only be used for penalty calculations, not power adjustments. - penalisedPower = disputeInfo.DisputedPower + _, penalizedSectorCount, err = disputeInfo.DisputedSectors.Count() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to count disputed sectors") // Load sectors for the dispute. sectors, err := LoadSectors(store, dlCurrent.SectorsSnapshot) @@ -571,7 +574,7 @@ func (a Actor) DisputeWindowedPoSt(rt Runtime, params *DisputeWindowedPoStParams // However, some of these sectors may have been // terminated. That's fine, we'll skip them. faultExpirationEpoch := targetDeadline.Last() + FaultMaxAge - powerDelta, err = dlCurrent.RecordFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, disputeInfo.DisputedSectors) + activeCountDelta, err = dlCurrent.RecordFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, disputeInfo.DisputedSectors) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to declare faults") err = deadlinesCurrent.UpdateDeadline(store, params.Deadline, dlCurrent) @@ -582,11 +585,12 @@ func (a Actor) DisputeWindowedPoSt(rt Runtime, params *DisputeWindowedPoStParams // Penalties. { + penalisedPower := SectorsPower(info.SectorSize, int(penalizedSectorCount)) // Calculate the base penalty. penaltyBase := PledgePenaltyForInvalidWindowPoSt( epochReward.ThisEpochRewardSmoothed, - pwrTotal.QualityAdjPowerSmoothed, - penalisedPower.QA, + pwrTotal.RawBytePowerSmoothed, + penalisedPower, ) // Calculate the target reward. @@ -614,6 +618,7 @@ func (a Actor) DisputeWindowedPoSt(rt Runtime, params *DisputeWindowedPoStParams } }) + powerDelta := big.Mul(SectorPower(info.SectorSize), big.NewInt(activeCountDelta)) requestUpdatePower(rt, powerDelta) if !toReward.IsZero() { @@ -683,9 +688,8 @@ func (a Actor) PreCommitSectorBatch(rt Runtime, params *PreCommitSectorBatchPara // Check per-sector preconditions before opening state transaction or sending other messages. challengeEarliest := currEpoch - MaxPreCommitRandomnessLookback - sectorsDeals := make([]market.SectorDeals, len(params.Sectors)) sectorNumbers := bitfield.New() - for i, precommit := range params.Sectors { + for _, precommit := range params.Sectors { // Bitfied.IsSet() is fast when there are only locally-set values. set, err := sectorNumbers.IsSet(uint64(precommit.SectorNumber)) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error checking sector number") @@ -718,25 +722,14 @@ func (a Actor) PreCommitSectorBatch(rt Runtime, params *PreCommitSectorBatchPara maxActivation := currEpoch + MaxProveCommitDuration[precommit.SealProof] validateExpiration(rt, maxActivation, precommit.Expiration, precommit.SealProof) - if precommit.ReplaceCapacity { + if precommit.ReplaceCapacity { // FIXME: remove ReplaceCapacity from the params now? rt.Abortf(exitcode.SysErrForbidden, "cc upgrade through precommit discontinued, use lightweight cc upgrade instead") } - - sectorsDeals[i] = market.SectorDeals{ - SectorExpiry: precommit.Expiration, - DealIDs: precommit.DealIDs, - } } // gather information from other actors rewardStats := requestCurrentEpochBlockReward(rt) pwrTotal := requestCurrentTotalPower(rt) - dealWeights := requestDealWeights(rt, sectorsDeals) - - if len(dealWeights.Sectors) != len(params.Sectors) { - rt.Abortf(exitcode.ErrIllegalState, "deal weight request returned %d records, expected %d", - len(dealWeights.Sectors), len(params.Sectors)) - } store := adt.AsStore(rt) var st State @@ -783,26 +776,21 @@ func (a Actor) PreCommitSectorBatch(rt Runtime, params *PreCommitSectorBatchPara rt.Abortf(exitcode.ErrIllegalArgument, "too many deals for sector %d > %d", len(precommit.DealIDs), dealCountMax) } - // Ensure total deal space does not exceed sector size. - dealWeight := dealWeights.Sectors[i] - if dealWeight.DealSpace > uint64(info.SectorSize) { - rt.Abortf(exitcode.ErrIllegalArgument, "deals too large to fit in sector %d > %d", dealWeight.DealSpace, info.SectorSize) - } - - // Estimate the sector weight using the current epoch as an estimate for activation, - // and compute the pre-commit deposit using that weight. - // The sector's power will be recalculated when it's proven. - duration := precommit.Expiration - currEpoch - sectorWeight := QAPowerForWeight(info.SectorSize, duration, dealWeight.DealWeight, dealWeight.VerifiedDealWeight) - depositReq := PreCommitDepositForPower(rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, sectorWeight) + sectorPower := SectorPower(info.SectorSize) + depositReq := PreCommitDepositForPower(rewardStats.ThisEpochRewardSmoothed, pwrTotal.RawBytePowerSmoothed, sectorPower) // Build on-chain record. chainInfos[i] = &SectorPreCommitOnChainInfo{ - Info: SectorPreCommitInfo(precommit), - PreCommitDeposit: depositReq, - PreCommitEpoch: currEpoch, - DealWeight: dealWeight.DealWeight, - VerifiedDealWeight: dealWeight.VerifiedDealWeight, + Info: SectorPreCommitInfo{ + SealProof: precommit.SealProof, + SectorNumber: precommit.SectorNumber, + SealedCID: precommit.SealedCID, + SealRandEpoch: precommit.SealRandEpoch, + DealIDs: precommit.DealIDs, + Expiration: precommit.Expiration, + }, + PreCommitDeposit: depositReq, + PreCommitEpoch: currEpoch, } totalDepositRequired = big.Add(totalDepositRequired, depositReq) @@ -955,7 +943,7 @@ func (a Actor) ProveCommitAggregate(rt Runtime, params *ProveCommitAggregatePara rew := requestCurrentEpochBlockReward(rt) pwr := requestCurrentTotalPower(rt) - confirmSectorProofsValid(rt, precommitsToConfirm, rew.ThisEpochBaselinePower, rew.ThisEpochRewardSmoothed, pwr.QualityAdjPowerSmoothed) + confirmSectorProofsValid(rt, precommitsToConfirm, rew.ThisEpochBaselinePower, rew.ThisEpochRewardSmoothed, pwr.RawBytePowerSmoothed) // Compute and burn the aggregate network fee. We need to re-load the state as // confirmSectorProofsValid can change it. @@ -1061,13 +1049,13 @@ func (a Actor) ConfirmSectorProofsValid(rt Runtime, params *builtin.ConfirmSecto precommittedSectors, err := st.FindPrecommittedSectors(store, params.Sectors...) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load pre-committed sectors") - confirmSectorProofsValid(rt, precommittedSectors, params.RewardBaselinePower, params.RewardSmoothed, params.QualityAdjPowerSmoothed) + confirmSectorProofsValid(rt, precommittedSectors, params.RewardBaselinePower, params.RewardSmoothed, params.RawBytePowerSmoothed) return nil } func confirmSectorProofsValid(rt Runtime, preCommits []*SectorPreCommitOnChainInfo, thisEpochBaselinePower big.Int, - thisEpochRewardSmoothed smoothing.FilterEstimate, qualityAdjPowerSmoothed smoothing.FilterEstimate) { + thisEpochRewardSmoothed smoothing.FilterEstimate, powerSmoothed smoothing.FilterEstimate) { circulatingSupply := rt.TotalFilCircSupply() @@ -1113,6 +1101,7 @@ func confirmSectorProofsValid(rt Runtime, preCommits []*SectorPreCommitOnChainIn rt.Abortf(exitcode.ErrIllegalArgument, "all prove commits failed to validate") } + totalPledge := big.Zero() depositToUnlock := big.Zero() newSectors := make([]*SectorOnChainInfo, 0) @@ -1122,6 +1111,15 @@ func confirmSectorProofsValid(rt Runtime, preCommits []*SectorPreCommitOnChainIn rt.StateTransaction(&st, func() { info := getMinerInfo(rt, &st) + sectorPower := SectorPower(info.SectorSize) + dayReward := ExpectedRewardForPower(thisEpochRewardSmoothed, powerSmoothed, sectorPower, builtin.EpochsInDay) + // The storage pledge is recorded for use in computing the penalty if this sector is terminated + // before its declared expiration. + // It's not capped to 1 FIL, so can exceed the actual initial pledge requirement. + storagePledge := ExpectedRewardForPower(thisEpochRewardSmoothed, powerSmoothed, sectorPower, InitialPledgeProjectionPeriod) + initialPledge := InitialPledgeForPower(sectorPower, thisEpochBaselinePower, thisEpochRewardSmoothed, + powerSmoothed, circulatingSupply) + newSectorNos := make([]abi.SectorNumber, 0, len(validPreCommits)) for _, precommit := range validPreCommits { // compute initial pledge @@ -1131,15 +1129,6 @@ func confirmSectorProofsValid(rt Runtime, preCommits []*SectorPreCommitOnChainIn rt.Log(rtt.WARN, "precommit %d has lifetime %d less than minimum. ignoring", precommit.Info.SectorNumber, duration, MinSectorExpiration) continue } - pwr := QAPowerForWeight(info.SectorSize, duration, precommit.DealWeight, precommit.VerifiedDealWeight) - - dayReward := ExpectedRewardForPower(thisEpochRewardSmoothed, qualityAdjPowerSmoothed, pwr, builtin.EpochsInDay) - // The storage pledge is recorded for use in computing the penalty if this sector is terminated - // before its declared expiration. - // It's not capped to 1 FIL, so can exceed the actual initial pledge requirement. - storagePledge := ExpectedRewardForPower(thisEpochRewardSmoothed, qualityAdjPowerSmoothed, pwr, InitialPledgeProjectionPeriod) - initialPledge := InitialPledgeForPower(pwr, thisEpochBaselinePower, thisEpochRewardSmoothed, - qualityAdjPowerSmoothed, circulatingSupply) newSectorInfo := SectorOnChainInfo{ SectorNumber: precommit.Info.SectorNumber, @@ -1148,8 +1137,8 @@ func confirmSectorProofsValid(rt Runtime, preCommits []*SectorPreCommitOnChainIn DealIDs: precommit.Info.DealIDs, Expiration: precommit.Info.Expiration, Activation: activation, - DealWeight: precommit.DealWeight, - VerifiedDealWeight: precommit.VerifiedDealWeight, + //DealWeight: precommit.DealWeight, + //VerifiedDealWeight: precommit.VerifiedDealWeight, InitialPledge: initialPledge, ExpectedDayReward: dayReward, ExpectedStoragePledge: storagePledge, @@ -1169,7 +1158,7 @@ func confirmSectorProofsValid(rt Runtime, preCommits []*SectorPreCommitOnChainIn err = st.DeletePrecommittedSectors(store, newSectorNos...) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete precommited sectors") - err = st.AssignSectorsToDeadlines(store, rt.CurrEpoch(), newSectors, info.WindowPoStPartitionSectors, info.SectorSize) + err = st.AssignSectorsToDeadlines(store, rt.CurrEpoch(), newSectors, info.WindowPoStPartitionSectors) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to assign new sectors to deadlines") // Unlock deposit for successful proofs, make it available for lock-up as initial pledge. @@ -1268,7 +1257,6 @@ func (a Actor) ExtendSectorExpiration(rt Runtime, params *ExtendSectorExpiration currEpoch := rt.CurrEpoch() - powerDelta := NewPowerPairZero() pledgeDelta := big.Zero() store := adt.AsStore(rt) var st State @@ -1343,20 +1331,9 @@ func (a Actor) ExtendSectorExpiration(rt Runtime, params *ExtendSectorExpiration } validateExpiration(rt, sector.Activation, decl.NewExpiration, sector.SealProof) - // Remove "spent" deal weights - newDealWeight := big.Div( - big.Mul(sector.DealWeight, big.NewInt(int64(sector.Expiration-currEpoch))), - big.NewInt(int64(sector.Expiration-sector.Activation)), - ) - newVerifiedDealWeight := big.Div( - big.Mul(sector.VerifiedDealWeight, big.NewInt(int64(sector.Expiration-currEpoch))), - big.NewInt(int64(sector.Expiration-sector.Activation)), - ) newSector := *sector newSector.Expiration = decl.NewExpiration - newSector.DealWeight = newDealWeight - newSector.VerifiedDealWeight = newVerifiedDealWeight newSectors[i] = &newSector } @@ -1366,10 +1343,9 @@ func (a Actor) ExtendSectorExpiration(rt Runtime, params *ExtendSectorExpiration builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update sectors %v", decl.Sectors) // Remove old sectors from partition and assign new sectors. - partitionPowerDelta, partitionPledgeDelta, err := partition.ReplaceSectors(store, oldSectors, newSectors, info.SectorSize, quant) + partitionPledgeDelta, err := partition.ReplaceSectors(store, oldSectors, newSectors, quant) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to replace sector expirations at deadline %v partition %v", dlIdx, decl.Partition) - powerDelta = powerDelta.Add(partitionPowerDelta) pledgeDelta = big.Add(pledgeDelta, partitionPledgeDelta) // expected to be zero, see note below. err = partitions.Set(decl.Partition, &partition) @@ -1405,7 +1381,6 @@ func (a Actor) ExtendSectorExpiration(rt Runtime, params *ExtendSectorExpiration builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") }) - requestUpdatePower(rt, powerDelta) // Note: the pledge delta is expected to be zero, since pledge is not re-calculated for the extension. // But in case that ever changes, we can do the right thing here. notifyPledgeChanged(rt, pledgeDelta) @@ -1474,13 +1449,14 @@ func (a Actor) TerminateSectors(rt Runtime, params *TerminateSectorsParams) *Ter var hadEarlyTerminations bool var st State + var info *MinerInfo store := adt.AsStore(rt) currEpoch := rt.CurrEpoch() - powerDelta := NewPowerPairZero() + activeCountDelta := int64(0) rt.StateTransaction(&st, func() { - hadEarlyTerminations = havePendingEarlyTerminations(rt, &st) + info = getMinerInfo(rt, &st) - info := getMinerInfo(rt, &st) + hadEarlyTerminations = havePendingEarlyTerminations(rt, &st) rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) deadlines, err := st.LoadDeadlines(adt.AsStore(rt)) @@ -1503,12 +1479,12 @@ func (a Actor) TerminateSectors(rt Runtime, params *TerminateSectorsParams) *Ter deadline, err := deadlines.LoadDeadline(store, dlIdx) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlIdx) - removedPower, err := deadline.TerminateSectors(store, sectors, currEpoch, partitionSectors, info.SectorSize, quant) + terminatedCount, err := deadline.TerminateSectors(store, sectors, currEpoch, partitionSectors, quant) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to terminate sectors in deadline %d", dlIdx) st.EarlyTerminations.Set(dlIdx) - powerDelta = powerDelta.Sub(removedPower) + activeCountDelta = activeCountDelta - int64(terminatedCount) err = deadlines.UpdateDeadline(store, dlIdx, deadline) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update deadline %d", dlIdx) @@ -1525,7 +1501,7 @@ func (a Actor) TerminateSectors(rt Runtime, params *TerminateSectorsParams) *Ter pwrTotal := requestCurrentTotalPower(rt) // Now, try to process these sectors. - more := processEarlyTerminations(rt, epochReward.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed) + more := processEarlyTerminations(rt, epochReward.ThisEpochRewardSmoothed, pwrTotal.RawBytePowerSmoothed) if more && !hadEarlyTerminations { // We have remaining terminations, and we didn't _previously_ // have early terminations to process, schedule a cron job. @@ -1539,6 +1515,7 @@ func (a Actor) TerminateSectors(rt Runtime, params *TerminateSectorsParams) *Ter err = st.CheckBalanceInvariants(rt.CurrentBalance()) builtin.RequireNoErr(rt, err, ErrBalanceInvariantBroken, "balance invariants broken") + powerDelta := big.Mul(SectorPower(info.SectorSize), big.NewInt(activeCountDelta)) requestUpdatePower(rt, powerDelta) return &TerminateSectorsReturn{Done: !more} } @@ -1582,9 +1559,10 @@ func (a Actor) DeclareFaults(rt Runtime, params *DeclareFaultsParams) *abi.Empty store := adt.AsStore(rt) var st State - powerDelta := NewPowerPairZero() + var info *MinerInfo + activeCountDelta := int64(0) rt.StateTransaction(&st, func() { - info := getMinerInfo(rt, &st) + info = getMinerInfo(rt, &st) rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) deadlines, err := st.LoadDeadlines(store) @@ -1605,13 +1583,13 @@ func (a Actor) DeclareFaults(rt Runtime, params *DeclareFaultsParams) *abi.Empty builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlIdx) faultExpirationEpoch := targetDeadline.Last() + FaultMaxAge - deadlinePowerDelta, err := deadline.RecordFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, pm) + deadlineActiveCountDelta, err := deadline.RecordFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, pm) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to declare faults for deadline %d", dlIdx) err = deadlines.UpdateDeadline(store, dlIdx, deadline) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to store deadline %d partitions", dlIdx) - powerDelta = powerDelta.Add(deadlinePowerDelta) + activeCountDelta = activeCountDelta + deadlineActiveCountDelta return nil }) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to iterate deadlines") @@ -1624,6 +1602,7 @@ func (a Actor) DeclareFaults(rt Runtime, params *DeclareFaultsParams) *abi.Empty // NOTE: It would be permissible to delay the power loss until the deadline closes, but that would require // additional accounting state. // https://github.com/filecoin-project/specs-actors/issues/414 + powerDelta := big.Mul(SectorPower(info.SectorSize), big.NewInt(activeCountDelta)) requestUpdatePower(rt, powerDelta) // Payment of penalty for declared faults is deferred to the deadline cron. @@ -1763,7 +1742,7 @@ func (a Actor) CompactPartitions(rt Runtime, params *CompactPartitionsParams) *a deadline, err := deadlines.LoadDeadline(store, params.Deadline) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", params.Deadline) - live, dead, removedPower, err := deadline.RemovePartitions(store, params.Partitions, quant) + live, dead, err := deadline.RemovePartitions(store, params.Partitions, quant) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to remove partitions from deadline %d", params.Deadline) err = st.DeleteSectors(store, dead) @@ -1773,11 +1752,11 @@ func (a Actor) CompactPartitions(rt Runtime, params *CompactPartitionsParams) *a builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load moved sectors") proven := true - addedPower, err := deadline.AddSectors(store, info.WindowPoStPartitionSectors, proven, sectors, info.SectorSize, quant) + addedCount, err := deadline.AddSectors(store, info.WindowPoStPartitionSectors, proven, sectors, quant) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add back moved sectors") - if !removedPower.Equals(addedPower) { - rt.Abortf(exitcode.ErrIllegalState, "power changed when compacting partitions: was %v, is now %v", removedPower, addedPower) + if uint64(len(sectors)) != addedCount { + rt.Abortf(exitcode.ErrIllegalState, "live sector count when compacting partitions: removed %d, added %d", len(sectors), addedCount) } err = deadlines.UpdateDeadline(store, params.Deadline, deadline) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update deadline %d", params.Deadline) @@ -2083,7 +2062,6 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams sectors, err := LoadSectors(store, stReadOnly.Sectors) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors array") - powerDelta := NewPowerPairZero() pledgeDelta := big.Zero() type updateAndSectorInfo struct { @@ -2091,7 +2069,6 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams sectorInfo *SectorOnChainInfo } - var sectorsDeals []market.SectorDeals var sectorsDataSpec []*market.SectorDataSpec var validatedUpdates []*updateAndSectorInfo sectorNumbers := bitfield.New() @@ -2176,22 +2153,12 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams update: &update, sectorInfo: sectorInfo, }) - - sectorsDeals = append(sectorsDeals, market.SectorDeals{DealIDs: update.Deals, SectorExpiry: sectorInfo.Expiration}) - sectorsDataSpec = append(sectorsDataSpec, &market.SectorDataSpec{ - SectorType: sectorInfo.SealProof, - DealIDs: update.Deals, - }) } builtin.RequireParam(rt, len(validatedUpdates) > 0, "no valid updates") // Errors past this point cause the ProveReplicaUpdates call to fail (no more skipping sectors) - dealWeights := requestDealWeights(rt, sectorsDeals) - builtin.RequirePredicate(rt, len(dealWeights.Sectors) == len(validatedUpdates), exitcode.ErrIllegalState, - "deal weight request returned %d records, expected %d", len(dealWeights.Sectors), len(validatedUpdates)) - unsealedSectorCIDs := requestUnsealedSectorCIDs(rt, sectorsDataSpec...) builtin.RequirePredicate(rt, len(unsealedSectorCIDs) == len(validatedUpdates), exitcode.ErrIllegalState, "unsealed sector cid request returned %d records, expected %d", len(unsealedSectorCIDs), len(validatedUpdates)) @@ -2199,7 +2166,6 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams type updateWithDetails struct { update *ReplicaUpdate sectorInfo *SectorOnChainInfo - dealWeight market.SectorWeights unsealedSectorCID cid.Cid } @@ -2213,7 +2179,6 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams declsByDeadline[updateWithSectorInfo.update.Deadline] = append(declsByDeadline[updateWithSectorInfo.update.Deadline], &updateWithDetails{ update: updateWithSectorInfo.update, sectorInfo: updateWithSectorInfo.sectorInfo, - dealWeight: dealWeights.Sectors[i], unsealedSectorCID: unsealedSectorCIDs[i], }) } @@ -2263,21 +2228,15 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams newSectorInfo.DealIDs = updateWithDetails.update.Deals newSectorInfo.Activation = rt.CurrEpoch() - newSectorInfo.DealWeight = updateWithDetails.dealWeight.DealWeight - newSectorInfo.VerifiedDealWeight = updateWithDetails.dealWeight.VerifiedDealWeight - // compute initial pledge - duration := updateWithDetails.sectorInfo.Expiration - rt.CurrEpoch() - - pwr := QAPowerForWeight(info.SectorSize, duration, newSectorInfo.DealWeight, newSectorInfo.VerifiedDealWeight) - + pwr := SectorPower(info.SectorSize) newSectorInfo.ReplacedDayReward = updateWithDetails.sectorInfo.ExpectedDayReward - newSectorInfo.ExpectedDayReward = ExpectedRewardForPower(rewRet.ThisEpochRewardSmoothed, powRet.QualityAdjPowerSmoothed, pwr, builtin.EpochsInDay) - newSectorInfo.ExpectedStoragePledge = ExpectedRewardForPower(rewRet.ThisEpochRewardSmoothed, powRet.QualityAdjPowerSmoothed, pwr, InitialPledgeProjectionPeriod) + newSectorInfo.ExpectedDayReward = ExpectedRewardForPower(rewRet.ThisEpochRewardSmoothed, powRet.RawBytePowerSmoothed, pwr, builtin.EpochsInDay) + newSectorInfo.ExpectedStoragePledge = ExpectedRewardForPower(rewRet.ThisEpochRewardSmoothed, powRet.RawBytePowerSmoothed, pwr, InitialPledgeProjectionPeriod) newSectorInfo.ReplacedSectorAge = maxEpoch(0, rt.CurrEpoch()-updateWithDetails.sectorInfo.Activation) initialPledgeAtUpgrade := InitialPledgeForPower(pwr, rewRet.ThisEpochBaselinePower, rewRet.ThisEpochRewardSmoothed, - powRet.QualityAdjPowerSmoothed, rt.TotalFilCircSupply()) + powRet.RawBytePowerSmoothed, rt.TotalFilCircSupply()) if initialPledgeAtUpgrade.GreaterThan(updateWithDetails.sectorInfo.InitialPledge) { deficit := big.Sub(initialPledgeAtUpgrade, updateWithDetails.sectorInfo.InitialPledge) @@ -2303,15 +2262,10 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams rt.Abortf(exitcode.ErrNotFound, "no such deadline %v partition %v", dlIdx, updateWithDetails.update.Partition) } - partitionPowerDelta, partitionPledgeDelta, err := partition.ReplaceSectors(store, - []*SectorOnChainInfo{updateWithDetails.sectorInfo}, - []*SectorOnChainInfo{&newSectorInfo}, - info.SectorSize, - quant) + partitionPledgeDelta, err := partition.ReplaceSectors(store, []*SectorOnChainInfo{updateWithDetails.sectorInfo}, []*SectorOnChainInfo{&newSectorInfo}, quant) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to replace sector at deadline %d partition %d", updateWithDetails.update.Deadline, updateWithDetails.update.Partition) - powerDelta = powerDelta.Add(partitionPowerDelta) pledgeDelta = big.Add(pledgeDelta, partitionPledgeDelta) err = partitions.Set(updateWithDetails.update.Partition, &partition) @@ -2347,7 +2301,6 @@ func (a Actor) ProveReplicaUpdates(rt Runtime, params *ProveReplicaUpdatesParams }) notifyPledgeChanged(rt, pledgeDelta) - requestUpdatePower(rt, powerDelta) return succeededSectors } @@ -2377,9 +2330,9 @@ func (a Actor) OnDeferredCronEvent(rt Runtime, params *builtin.DeferredCronEvent switch payload.EventType { case CronEventProvingDeadline: - handleProvingDeadline(rt, params.RewardSmoothed, params.QualityAdjPowerSmoothed) + handleProvingDeadline(rt, params.RewardSmoothed, params.RawBytePowerSmoothed) case CronEventProcessEarlyTerminations: - if processEarlyTerminations(rt, params.RewardSmoothed, params.QualityAdjPowerSmoothed) { + if processEarlyTerminations(rt, params.RewardSmoothed, params.RawBytePowerSmoothed) { scheduleEarlyTerminationWork(rt) } default: @@ -2400,7 +2353,7 @@ func (a Actor) OnDeferredCronEvent(rt Runtime, params *builtin.DeferredCronEvent // TODO: We're using the current power+epoch reward. Technically, we // should use the power/reward at the time of termination. // https://github.com/filecoin-project/specs-actors/v8/pull/648 -func processEarlyTerminations(rt Runtime, rewardSmoothed smoothing.FilterEstimate, qualityAdjPowerSmoothed smoothing.FilterEstimate) (more bool) { +func processEarlyTerminations(rt Runtime, rewardSmoothed smoothing.FilterEstimate, powerSmoothed smoothing.FilterEstimate) (more bool) { store := adt.AsStore(rt) var ( @@ -2443,7 +2396,7 @@ func processEarlyTerminations(rt Runtime, rewardSmoothed smoothing.FilterEstimat totalInitialPledge = big.Add(totalInitialPledge, sector.InitialPledge) } penalty = big.Add(penalty, terminationPenalty(info.SectorSize, epoch, - rewardSmoothed, qualityAdjPowerSmoothed, sectors)) + rewardSmoothed, powerSmoothed, sectors)) dealsToTerminate = append(dealsToTerminate, params) return nil @@ -2491,19 +2444,22 @@ func processEarlyTerminations(rt Runtime, rewardSmoothed smoothing.FilterEstimat // Invoked at the end of the last epoch for each proving deadline. func handleProvingDeadline(rt Runtime, rewardSmoothed smoothing.FilterEstimate, - qualityAdjPowerSmoothed smoothing.FilterEstimate) { + powerSmoothed smoothing.FilterEstimate) { currEpoch := rt.CurrEpoch() store := adt.AsStore(rt) hadEarlyTerminations := false - powerDeltaTotal := NewPowerPairZero() + activeCountDeltaTotal := int64(0) penaltyTotal := abi.NewTokenAmount(0) pledgeDeltaTotal := abi.NewTokenAmount(0) var continueCron bool var st State + var info *MinerInfo rt.StateTransaction(&st, func() { + info = getMinerInfo(rt, &st) + { // Vest locked funds. // This happens first so that any subsequent penalties are taken @@ -2538,13 +2494,14 @@ func handleProvingDeadline(rt Runtime, // Faults detected by this missed PoSt pay no penalty, but sectors that were already faulty // and remain faulty through this deadline pay the fault fee. + previouslyFaultyPower := big.Mul(SectorPower(info.SectorSize), big.NewIntUnsigned(result.PreviouslyFaultyCount)) penaltyTarget := PledgePenaltyForContinuedFault( rewardSmoothed, - qualityAdjPowerSmoothed, - result.PreviouslyFaultyPower.QA, + powerSmoothed, + previouslyFaultyPower, ) - powerDeltaTotal = powerDeltaTotal.Add(result.PowerDelta) + activeCountDeltaTotal = activeCountDeltaTotal + result.ActiveCountDelta pledgeDeltaTotal = big.Add(pledgeDeltaTotal, result.PledgeDelta) err = st.ApplyPenalty(penaltyTarget) @@ -2562,8 +2519,11 @@ func handleProvingDeadline(rt Runtime, st.DeadlineCronActive = false } }) + + powerDelta := big.Mul(SectorPower(info.SectorSize), big.NewInt(activeCountDeltaTotal)) + // Remove power for new faults, and burn penalties. - requestUpdatePower(rt, powerDeltaTotal) + requestUpdatePower(rt, powerDelta) burnFunds(rt, penaltyTotal, BurnMethodHandleProvingDeadline) notifyPledgeChanged(rt, pledgeDeltaTotal) @@ -2584,7 +2544,7 @@ func handleProvingDeadline(rt Runtime, // handle them at the next epoch. if !hadEarlyTerminations && hasEarlyTerminations { // First, try to process some of these terminations. - if processEarlyTerminations(rt, rewardSmoothed, qualityAdjPowerSmoothed) { + if processEarlyTerminations(rt, rewardSmoothed, powerSmoothed) { // If that doesn't work, just defer till the next epoch. scheduleEarlyTerminationWork(rt) } @@ -2640,7 +2600,7 @@ func enrollCronEvent(rt Runtime, eventEpoch abi.ChainEpoch, callbackPayload *Cro builtin.RequireSuccess(rt, code, "failed to enroll cron event") } -func requestUpdatePower(rt Runtime, delta PowerPair) { +func requestUpdatePower(rt Runtime, delta abi.StoragePower) { if delta.IsZero() { return } @@ -2648,8 +2608,7 @@ func requestUpdatePower(rt Runtime, delta PowerPair) { builtin.StoragePowerActorAddr, builtin.MethodsPower.UpdateClaimedPower, &power.UpdateClaimedPowerParams{ - RawByteDelta: delta.Raw, - QualityAdjustedDelta: delta.QA, + RawByteDelta: delta, }, abi.NewTokenAmount(0), &builtin.Discard{}, @@ -2800,40 +2759,6 @@ func requestUnsealedSectorCIDs(rt Runtime, dataCommitmentInputs ...*market.Secto return unsealedCIDs } -func requestDealWeights(rt Runtime, sectors []market.SectorDeals) *market.VerifyDealsForActivationReturn { - // Short-circuit if there are no deals in any of the sectors. - dealCount := 0 - for _, sector := range sectors { - dealCount += len(sector.DealIDs) - } - if dealCount == 0 { - emptyResult := &market.VerifyDealsForActivationReturn{ - Sectors: make([]market.SectorWeights, len(sectors)), - } - for i := 0; i < len(sectors); i++ { - emptyResult.Sectors[i] = market.SectorWeights{ - DealSpace: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - } - } - return emptyResult - } - - var dealWeights market.VerifyDealsForActivationReturn - code := rt.Send( - builtin.StorageMarketActorAddr, - builtin.MethodsMarket.VerifyDealsForActivation, - &market.VerifyDealsForActivationParams{ - Sectors: sectors, - }, - abi.NewTokenAmount(0), - &dealWeights, - ) - builtin.RequireSuccess(rt, code, "failed to verify deals and get deal weight") - return &dealWeights -} - // Requests the current epoch target block reward from the reward actor. // return value includes reward, smoothed estimate of reward, and baseline power func requestCurrentEpochBlockReward(rt Runtime) reward.ThisEpochRewardReturn { @@ -3002,35 +2927,23 @@ func validatePartitionContainsSectors(partition *Partition, sectors bitfield.Bit } func terminationPenalty(sectorSize abi.SectorSize, currEpoch abi.ChainEpoch, - rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, sectors []*SectorOnChainInfo) abi.TokenAmount { + rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, sectors []*SectorOnChainInfo) abi.TokenAmount { totalFee := big.Zero() + sectorPower := SectorPower(sectorSize) for _, s := range sectors { - sectorPower := QAPowerForSector(sectorSize, s) fee := PledgePenaltyForTermination(s.ExpectedDayReward, currEpoch-s.Activation, s.ExpectedStoragePledge, - networkQAPowerEstimate, sectorPower, rewardEstimate, s.ReplacedDayReward, s.ReplacedSectorAge) + networkPowerEstimate, sectorPower, rewardEstimate, s.ReplacedDayReward, s.ReplacedSectorAge) totalFee = big.Add(fee, totalFee) } return totalFee } -func PowerForSector(sectorSize abi.SectorSize, sector *SectorOnChainInfo) PowerPair { - return PowerPair{ - Raw: big.NewIntUnsigned(uint64(sectorSize)), - QA: QAPowerForSector(sectorSize, sector), - } +func SectorPower(ssize abi.SectorSize) abi.StoragePower { + return big.NewIntUnsigned(uint64(ssize)) } -// Returns the sum of the raw byte and quality-adjusted power for sectors. -func PowerForSectors(ssize abi.SectorSize, sectors []*SectorOnChainInfo) PowerPair { - qa := big.Zero() - for _, s := range sectors { - qa = big.Add(qa, QAPowerForSector(ssize, s)) - } - - return PowerPair{ - Raw: big.Mul(big.NewIntUnsigned(uint64(ssize)), big.NewIntUnsigned(uint64(len(sectors)))), - QA: qa, - } +func SectorsPower(ssize abi.SectorSize, count int) abi.StoragePower { + return big.Mul(SectorPower(ssize), big.NewInt(int64(count))) } func ConsensusFaultActive(info *MinerInfo, currEpoch abi.ChainEpoch) bool { diff --git a/actors/builtin/miner/miner_commitment_test.go b/actors/builtin/miner/miner_commitment_test.go index 5df91c14d..8b9807bb7 100644 --- a/actors/builtin/miner/miner_commitment_test.go +++ b/actors/builtin/miner/miner_commitment_test.go @@ -271,7 +271,7 @@ func TestCommitments(t *testing.T) { rt.Reset() // Deals too large for sector - dealWeight := big.Mul(big.NewIntUnsigned(32<<30), big.NewInt(int64(expiration-rt.Epoch()))) + dealWeight := big.Mul(miner.SectorPower(32<<30), big.NewInt(int64(expiration-rt.Epoch()))) rt.ExpectAbortContainsMessage(exitcode.ErrIllegalArgument, "deals too large", func() { actor.preCommitSector(rt, actor.makePreCommit(0, challengeEpoch, expiration, []abi.DealID{1}), preCommitConf{ dealWeight: dealWeight, @@ -714,7 +714,7 @@ func TestProveCommit(t *testing.T) { expectedPower := big.Mul(big.NewInt(int64(actor.sectorSize)), big.Div(builtin.VerifiedDealWeightMultiplier, builtin.QualityBaseMultiplier)) qaPower := miner.QAPowerForWeight(actor.sectorSize, precommit.Info.Expiration-rt.Epoch(), precommit.DealWeight, precommit.VerifiedDealWeight) assert.Equal(t, expectedPower, qaPower) - sectorPower := miner.NewPowerPair(big.NewIntUnsigned(uint64(actor.sectorSize)), qaPower) + sectorPower := miner.SectorPower(actor.sectorSize) // expect deal weights to be transferred to on chain info assert.Equal(t, precommit.DealWeight, sector.DealWeight) @@ -755,8 +755,8 @@ func TestProveCommit(t *testing.T) { assertBitfieldEquals(t, entry.OnTimeSectors, uint64(sectorNo)) assertEmptyBitfield(t, entry.EarlySectors) assert.Equal(t, expectedInitialPledge, entry.OnTimePledge) - assert.Equal(t, sectorPower, entry.ActivePower) - assert.Equal(t, miner.NewPowerPairZero(), entry.FaultyPower) + assert.Equal(t, sectorPower, entry.ActiveCount) + assert.Equal(t, miner.NewPowerPairZero(), entry.FaultyCount) }) t.Run("prove sectors from batch pre-commit", func(t *testing.T) { @@ -1181,7 +1181,7 @@ func TestAggregateProveCommit(t *testing.T) { })) - sectorPower := miner.NewPowerPair(big.NewIntUnsigned(uint64(actor.sectorSize)), qaPower) + sectorPower := miner.SectorPower(actor.sectorSize) tenSectorsPower := miner.NewPowerPair(big.Mul(big.NewInt(10), sectorPower.Raw), big.Mul(big.NewInt(10), sectorPower.QA)) dlIdx := uint64(0) @@ -1213,8 +1213,8 @@ func TestAggregateProveCommit(t *testing.T) { assert.Equal(t, entry.OnTimeSectors, sectorNosBf) assertEmptyBitfield(t, entry.EarlySectors) assert.Equal(t, tenSectorsInitialPledge, entry.OnTimePledge) - assert.Equal(t, tenSectorsPower, entry.ActivePower) - assert.Equal(t, miner.NewPowerPairZero(), entry.FaultyPower) + assert.Equal(t, tenSectorsPower, entry.ActiveCount) + assert.Equal(t, miner.NewPowerPairZero(), entry.FaultyCount) // expect 10x locked initial pledge of sector to be the same as pledge requirement assert.Equal(t, tenSectorsInitialPledge, st.InitialPledge) diff --git a/actors/builtin/miner/miner_state.go b/actors/builtin/miner/miner_state.go index 565837d62..66fbd4e8f 100644 --- a/actors/builtin/miner/miner_state.go +++ b/actors/builtin/miner/miner_state.go @@ -141,11 +141,12 @@ type SectorPreCommitInfo struct { SealRandEpoch abi.ChainEpoch DealIDs []abi.DealID Expiration abi.ChainEpoch - ReplaceCapacity bool // Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs) - // The committed capacity sector to replace, and it's deadline/partition location - ReplaceSectorDeadline uint64 - ReplaceSectorPartition uint64 - ReplaceSectorNumber abi.SectorNumber + //ReplaceCapacity bool // Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs) + //// The committed capacity sector to replace, and it's deadline/partition location. + //// Only applicable to sectors committed before v7. + //ReplaceSectorDeadline uint64 + //ReplaceSectorPartition uint64 + //ReplaceSectorNumber abi.SectorNumber } // Information stored on-chain for a pre-committed sector. @@ -153,8 +154,8 @@ type SectorPreCommitOnChainInfo struct { Info SectorPreCommitInfo PreCommitDeposit abi.TokenAmount PreCommitEpoch abi.ChainEpoch - DealWeight abi.DealWeight // Integral of active deals over sector lifetime - VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime + //DealWeight abi.DealWeight // Integral of active deals over sector lifetime + //VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime } // Information stored on-chain for a proven sector. @@ -165,8 +166,10 @@ type SectorOnChainInfo struct { DealIDs []abi.DealID Activation abi.ChainEpoch // Epoch during which the sector proof was accepted Expiration abi.ChainEpoch // Epoch during which the sector expires - DealWeight abi.DealWeight // Integral of active deals over sector lifetime - VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime + //DealWeight abi.DealWeight // Integral of active deals over sector lifetime + //VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime + // Initial pledge and expected reward/pledge are technically derivable from the activation epoch and a history + // of network power, but retained for convenient and transparent access. InitialPledge abi.TokenAmount // Pledge collected to commit this sector ExpectedDayReward abi.TokenAmount // Expected one day projection of reward for sector computed at activation time ExpectedStoragePledge abi.TokenAmount // Expected twenty day projection of reward for sector computed at activation time @@ -528,9 +531,8 @@ func (st *State) FindSector(store adt.Store, sno abi.SectorNumber) (uint64, uint } // Assign new sectors to deadlines. -func (st *State) AssignSectorsToDeadlines( - store adt.Store, currentEpoch abi.ChainEpoch, sectors []*SectorOnChainInfo, partitionSize uint64, sectorSize abi.SectorSize, -) error { +func (st *State) AssignSectorsToDeadlines(store adt.Store, currentEpoch abi.ChainEpoch, sectors []*SectorOnChainInfo, + partitionSize uint64) error { deadlines, err := st.LoadDeadlines(store) if err != nil { return err @@ -567,7 +569,7 @@ func (st *State) AssignSectorsToDeadlines( // The power returned from AddSectors is ignored because it's not activated (proven) yet. proven := false - if _, err := dl.AddSectors(store, partitionSize, proven, deadlineSectors, sectorSize, quant); err != nil { + if _, err := dl.AddSectors(store, partitionSize, proven, deadlineSectors, quant); err != nil { return err } @@ -1083,12 +1085,12 @@ func (st *State) CleanUpExpiredPreCommits(store adt.Store, currEpoch abi.ChainEp type AdvanceDeadlineResult struct { PledgeDelta abi.TokenAmount - PowerDelta PowerPair - PreviouslyFaultyPower PowerPair // Power that was faulty before this advance (including recovering) - DetectedFaultyPower PowerPair // Power of new faults and failed recoveries - TotalFaultyPower PowerPair // Total faulty power after detecting faults (before expiring sectors) - // Note that failed recovery power is included in both PreviouslyFaultyPower and DetectedFaultyPower, - // so TotalFaultyPower is not simply their sum. + ActiveCountDelta int64 + PreviouslyFaultyCount uint64 // Count of that were faulty before this advance (including recovering) + DetectedFaultyCount uint64 // Count of new faults and failed recoveries + TotalFaultyCount uint64 // Total faulty count after detecting faults (before expiring sectors) + // Note that failed recoveries are included in both PreviouslyFaultyCount and DetectedFaultyCount, + // so TotalFaultyCount is not simply their sum. } // AdvanceDeadline advances the deadline. It: @@ -1097,10 +1099,10 @@ type AdvanceDeadlineResult struct { // - Returns the changes to power & pledge, and faulty power (both declared and undeclared). func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*AdvanceDeadlineResult, error) { pledgeDelta := abi.NewTokenAmount(0) - powerDelta := NewPowerPairZero() + activeCountDelta := int64(0) - var totalFaultyPower PowerPair - detectedFaultyPower := NewPowerPairZero() + totalFaultyCount := uint64(0) + detectedFaultyCount := uint64(0) // Note: Use dlInfo.Last() rather than rt.CurrEpoch unless certain // of the desired semantics. In the past, this method would sometimes be @@ -1120,10 +1122,10 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad if !dlInfo.PeriodStarted() { return &AdvanceDeadlineResult{ pledgeDelta, - powerDelta, - NewPowerPairZero(), - NewPowerPairZero(), - NewPowerPairZero(), + activeCountDelta, + 0, + 0, + 0, }, nil } @@ -1143,7 +1145,7 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad return nil, xerrors.Errorf("failed to load deadline %d: %w", dlInfo.Index, err) } - previouslyFaultyPower := deadline.FaultyPower + previouslyFaultyCount := deadline.FaultySectors // No live sectors in this deadline, nothing to do. if live, err := deadline.IsLive(); err != nil { @@ -1151,10 +1153,10 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad } else if !live { return &AdvanceDeadlineResult{ pledgeDelta, - powerDelta, - previouslyFaultyPower, - detectedFaultyPower, - deadline.FaultyPower, + activeCountDelta, + previouslyFaultyCount, + detectedFaultyCount, + deadline.FaultySectors, }, nil } @@ -1163,15 +1165,15 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad // Detect and penalize missing proofs. faultExpiration := dlInfo.Last() + FaultMaxAge - // detectedFaultyPower is new faults and failed recoveries - powerDelta, detectedFaultyPower, err = deadline.ProcessDeadlineEnd(store, quant, faultExpiration, st.Sectors) + // detectedFaultyCount is new faults and failed recoveries + activeCountDelta, detectedFaultyCount, err = deadline.ProcessDeadlineEnd(store, quant, faultExpiration, st.Sectors) if err != nil { return nil, xerrors.Errorf("failed to process end of deadline %d: %w", dlInfo.Index, err) } // Capture deadline's faulty power after new faults have been detected, but before it is // dropped along with faulty sectors expiring this round. - totalFaultyPower = deadline.FaultyPower + totalFaultyCount = deadline.FaultySectors } { // Expire sectors that are due, either for on-time expiration or "early" faulty-for-too-long. @@ -1190,7 +1192,7 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad // Record reduction in power of the amount of expiring active power. // Faulty power has already been lost, so the amount expiring can be excluded from the delta. - powerDelta = powerDelta.Sub(expired.ActivePower) + activeCountDelta = activeCountDelta - int64(expired.ActiveCount) // Record deadlines with early terminations. While this // bitfield is non-empty, the miner is locked until they @@ -1219,10 +1221,10 @@ func (st *State) AdvanceDeadline(store adt.Store, currEpoch abi.ChainEpoch) (*Ad // Be very careful when changing these as any changes can affect rounding. return &AdvanceDeadlineResult{ PledgeDelta: pledgeDelta, - PowerDelta: powerDelta, - PreviouslyFaultyPower: previouslyFaultyPower, - DetectedFaultyPower: detectedFaultyPower, - TotalFaultyPower: totalFaultyPower, + ActiveCountDelta: activeCountDelta, + PreviouslyFaultyCount: previouslyFaultyCount, + DetectedFaultyCount: detectedFaultyCount, + TotalFaultyCount: totalFaultyCount, }, nil } diff --git a/actors/builtin/miner/miner_state_test.go b/actors/builtin/miner/miner_state_test.go index 471b02154..64720443c 100644 --- a/actors/builtin/miner/miner_state_test.go +++ b/actors/builtin/miner/miner_state_test.go @@ -687,8 +687,7 @@ func TestSectorAssignment(t *testing.T) { t.Run("assign sectors to deadlines", func(t *testing.T) { harness := constructStateHarness(t, abi.ChainEpoch(0)) - err := harness.s.AssignSectorsToDeadlines(harness.store, 0, sectorInfos, - partitionSectors, sectorSize) + err := harness.s.AssignSectorsToDeadlines(harness.store, 0, sectorInfos, partitionSectors) require.NoError(t, err) sectorArr := sectorsArr(t, harness.store, sectorInfos) @@ -730,14 +729,14 @@ func TestSectorAssignment(t *testing.T) { result, err := dl.RecordProvenSectors(harness.store, sectorArr, sectorSize, quantSpec, 0, postPartitions) require.NoError(t, err) - expectedPowerDelta := miner.PowerForSectors(sectorSize, selectSectors(t, sectorInfos, allSectorBf)) + expectedPowerDelta := miner.SectorsPower(sectorSize, len(allSectorNos)) assertBitfieldsEqual(t, allSectorBf, result.Sectors) assertBitfieldEmpty(t, result.IgnoredSectors) - assert.True(t, result.NewFaultyPower.Equals(miner.NewPowerPairZero())) - assert.True(t, result.PowerDelta.Equals(expectedPowerDelta)) - assert.True(t, result.RecoveredPower.Equals(miner.NewPowerPairZero())) - assert.True(t, result.RetractedRecoveryPower.Equals(miner.NewPowerPairZero())) + assert.True(t, result.NewFaultyCount.Equals(miner.NewPowerPairZero())) + assert.True(t, result.ActiveCountDelta.Equals(expectedPowerDelta)) + assert.True(t, result.RecoveredCount.Equals(miner.NewPowerPairZero())) + assert.True(t, result.RetractedRecoveryCount.Equals(miner.NewPowerPairZero())) return nil })) @@ -1011,8 +1010,6 @@ func newPreCommitOnChain(sectorNo abi.SectorNumber, sealed cid.Cid, deposit abi. Info: *info, PreCommitDeposit: deposit, PreCommitEpoch: epoch, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), } } @@ -1025,8 +1022,6 @@ func newSectorOnChainInfo(sectorNo abi.SectorNumber, sealed cid.Cid, weight big. DealIDs: nil, Activation: activation, Expiration: abi.ChainEpoch(1), - DealWeight: weight, - VerifiedDealWeight: weight, InitialPledge: abi.NewTokenAmount(0), ExpectedDayReward: abi.NewTokenAmount(0), ExpectedStoragePledge: abi.NewTokenAmount(0), diff --git a/actors/builtin/miner/miner_test.go b/actors/builtin/miner/miner_test.go index 3b1fd22d8..7f91d014b 100644 --- a/actors/builtin/miner/miner_test.go +++ b/actors/builtin/miner/miner_test.go @@ -402,7 +402,7 @@ func TestWindowPost(t *testing.T) { actor.constructAndVerify(rt) store := rt.AdtStore() sector := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true)[0] - pwr := miner.PowerForSector(actor.sectorSize, sector) + pwr := miner.SectorPower(actor.sectorSize) // Skip to the right deadline. st := getState(rt) @@ -466,7 +466,7 @@ func TestWindowPost(t *testing.T) { actor.constructAndVerify(rt) store := rt.AdtStore() sector := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true)[0] - pwr := miner.PowerForSector(actor.sectorSize, sector) + pwr := miner.SectorPower(actor.sectorSize) // Skip to the due deadline. dlIdx, pIdx, err := getState(rt).FindSector(store, sector.SectorNumber) @@ -705,7 +705,7 @@ func TestWindowPost(t *testing.T) { actor.constructAndVerify(rt) store := rt.AdtStore() sector := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true)[0] - pwr := miner.PowerForSector(actor.sectorSize, sector) + pwr := miner.SectorPower(actor.sectorSize) // Skip to the due deadline. st := getState(rt) @@ -814,7 +814,7 @@ func TestWindowPost(t *testing.T) { {Index: 1, Skipped: bitfield.New()}, } sectorsToProve := []*miner.SectorOnChainInfo{lastSector} - pwr := miner.PowerForSectors(actor.sectorSize, sectorsToProve) + pwr := miner.SectorsPower(actor.sectorSize, len(sectorsToProve)) actor.submitWindowPoSt(rt, dlinfo, partitions, sectorsToProve, &poStConfig{ expectedPowerDelta: pwr, }) @@ -837,7 +837,7 @@ func TestWindowPost(t *testing.T) { Build(t) actor.constructAndVerify(rt) infos := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true) - pwr := miner.PowerForSectors(actor.sectorSize, infos) + pwr := miner.SectorsPower(actor.sectorSize, len(infos)) actor.applyRewards(rt, bigRewards, big.Zero()) initialLocked := actor.getLockedFunds(rt) @@ -921,7 +921,7 @@ func TestWindowPost(t *testing.T) { // Now submit PoSt with a skipped fault for first sector // First sector's power should not be activated. - powerActive := miner.PowerForSectors(actor.sectorSize, infos[1:]) + powerActive := miner.SectorsPower(actor.sectorSize, len(infos)-1) cfg := &poStConfig{ expectedPowerDelta: powerActive, } @@ -1097,7 +1097,7 @@ func TestWindowPost(t *testing.T) { actor.constructAndVerify(rt) store := rt.AdtStore() sector := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true)[0] - pwr := miner.PowerForSector(actor.sectorSize, sector) + pwr := miner.SectorsPower(actor.sectorSize) // Skip to the due deadline. st := getState(rt) @@ -1259,7 +1259,7 @@ func TestWindowPost(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, targetSectors) - pwr := miner.PowerForSectors(actor.sectorSize, targetSectors) + pwr := miner.SectorsPower(actor.sectorSize, len(targetSectors)) // And challenge the last partition. var result *poStDisputeResult @@ -1304,7 +1304,7 @@ func TestDeadlineCron(t *testing.T) { sectors := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true) // advance cron to activate power. advanceAndSubmitPoSts(rt, actor, sectors...) - activePower := miner.PowerForSectors(actor.sectorSize, sectors) + activePower := miner.SectorsPower(actor.sectorSize, len(sectors)) st := getState(rt) initialPledge := st.InitialPledge @@ -1344,7 +1344,7 @@ func TestDeadlineCron(t *testing.T) { sectors := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true) // advance cron to activate power. advanceAndSubmitPoSts(rt, actor, sectors...) - activePower := miner.PowerForSectors(actor.sectorSize, sectors) + activePower := miner.SectorsPower(actor.sectorSize, len(sectors)) st := getState(rt) initialPledge := st.InitialPledge @@ -1394,10 +1394,10 @@ func TestDeadlineCron(t *testing.T) { activeSectors := actor.commitAndProveSectors(rt, 2, defaultSectorExpiration, nil, true) // advance cron to activate power. advanceAndSubmitPoSts(rt, actor, activeSectors...) - activePower := miner.PowerForSectors(actor.sectorSize, activeSectors) + activePower := miner.SectorsPower(actor.sectorSize, len(activeSectors)) unprovenSectors := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, false) - unprovenPower := miner.PowerForSectors(actor.sectorSize, unprovenSectors) + unprovenPower := miner.SectorsPower(actor.sectorSize, len(unprovenSectors)) totalPower := unprovenPower.Add(activePower) allSectors := append(activeSectors, unprovenSectors...) @@ -1478,7 +1478,7 @@ func TestDeadlineCron(t *testing.T) { rt.SetEpoch(dlinfo.Last()) // run cron and expect all sectors to be detected as faults (no penalty) - pwr := miner.PowerForSectors(actor.sectorSize, allSectors) + pwr := miner.SectorsPower(actor.sectorSize, len(allSectors)) // power for sectors is removed powerDeltaClaim := miner.NewPowerPair(pwr.Raw.Neg(), pwr.QA.Neg()) @@ -1652,7 +1652,7 @@ func TestDeclareFaults(t *testing.T) { rt := builder.Build(t) actor.constructAndVerify(rt) allSectors := actor.commitAndProveSectors(rt, 1, defaultSectorExpiration, nil, true) - pwr := miner.PowerForSectors(actor.sectorSize, allSectors) + pwr := miner.SectorsPower(actor.sectorSize, len(allSectors)) // add lots of funds so penalties come from vesting funds actor.applyRewards(rt, bigRewards, big.Zero()) @@ -1743,7 +1743,7 @@ func TestDeclareRecoveries(t *testing.T) { } // Can't pay during this deadline so miner goes into fee debt - ongoingPwr := miner.PowerForSectors(actor.sectorSize, oneSector) + ongoingPwr := miner.SectorPower(actor.sectorSize) ff := miner.PledgePenaltyForContinuedFault(actor.epochRewardSmooth, actor.epochQAPowerSmooth, ongoingPwr.QA) advanceDeadline(rt, actor, &cronConfig{ continuedFaultsPenalty: big.Zero(), // fee is instead added to debt @@ -1951,13 +1951,13 @@ func TestExtendSectorExpiration(t *testing.T) { _, partition := actor.getDeadlineAndPartition(rt, dlIdx, pIdx) expirationSet, err := partition.PopExpiredSectors(rt.AdtStore(), newExpiration-1, quant) require.NoError(t, err) - empty, err := expirationSet.IsEmpty() + empty := expirationSet.IsEmpty() require.NoError(t, err) assert.True(t, empty) expirationSet, err = partition.PopExpiredSectors(rt.AdtStore(), quant.QuantizeUp(newExpiration), quant) require.NoError(t, err) - empty, err = expirationSet.IsEmpty() + empty = expirationSet.IsEmpty() require.NoError(t, err) assert.False(t, empty) @@ -2099,7 +2099,7 @@ func TestExtendSectorExpiration(t *testing.T) { actor.submitWindowPoSt(rt, dlinfo, partitions, []*miner.SectorOnChainInfo{newSector}, nil) // advance one more time. No missed PoSt fees are charged. Total Power and pledge are lowered. - pwr := miner.PowerForSectors(actor.sectorSize, []*miner.SectorOnChainInfo{newSector}).Neg() + pwr := miner.SectorPower(actor.sectorSize).Neg() advanceDeadline(rt, actor, &cronConfig{ noEnrollment: true, expiredSectorsPowerDelta: &pwr, @@ -3927,7 +3927,7 @@ func (h *actorHarness) preCommitSector(rt *mock.Runtime, params *miner.PreCommit VerifiedDealWeight: conf.verifiedDealWeight, }}, } - rt.ExpectSend(builtin.StorageMarketActorAddr, builtin.MethodsMarket.VerifyDealsForActivation, &vdParams, big.Zero(), &vdReturn, exitcode.Ok) + rt.ExpectSend(builtin.StorageMarketActorAddr, builtin.MethodsMarket.Deprecated1, &vdParams, big.Zero(), &vdReturn, exitcode.Ok) } else { // Ensure the deal IDs and configured deal weight returns are consistent. require.Equal(h.t, abi.SectorSize(0), conf.dealSpace, "no deals but positive deal space configured") @@ -3996,7 +3996,7 @@ func (h *actorHarness) preCommitSectorBatch(rt *mock.Runtime, params *miner.PreC vdReturn := market.VerifyDealsForActivationReturn{ Sectors: sectorWeights, } - rt.ExpectSend(builtin.StorageMarketActorAddr, builtin.MethodsMarket.VerifyDealsForActivation, &vdParams, big.Zero(), &vdReturn, exitcode.Ok) + rt.ExpectSend(builtin.StorageMarketActorAddr, builtin.MethodsMarket.Deprecated1, &vdParams, big.Zero(), &vdReturn, exitcode.Ok) } st := getState(rt) // burn networkFee @@ -4192,7 +4192,7 @@ func (h *actorHarness) confirmSectorProofsValidInternal(rt *mock.Runtime, conf p if duration >= miner.MinSectorExpiration { qaPowerDelta := miner.QAPowerForWeight(h.sectorSize, duration, precommitOnChain.DealWeight, precommitOnChain.VerifiedDealWeight) expectQAPower = big.Add(expectQAPower, qaPowerDelta) - expectRawPower = big.Add(expectRawPower, big.NewIntUnsigned(uint64(h.sectorSize))) + expectRawPower = big.Add(expectRawPower, miner.SectorPower(h.sectorSize)) pledge := miner.InitialPledgeForPower(qaPowerDelta, h.baselinePower, h.epochRewardSmooth, h.epochQAPowerSmooth, rt.TotalFilCircSupply()) @@ -4565,7 +4565,7 @@ func (h *actorHarness) declareFaults(rt *mock.Runtime, faultSectorInfos ...*mine ss, err := faultSectorInfos[0].SealProof.SectorSize() require.NoError(h.t, err) - expectedRawDelta, expectedQADelta := powerForSectors(ss, faultSectorInfos) + expectedRawDelta := miner.SectorsPower(ss, len(faultSectorInfos)) expectedRawDelta = expectedRawDelta.Neg() expectedQADelta = expectedQADelta.Neg() @@ -4689,7 +4689,7 @@ func (h *actorHarness) terminateSectors(rt *mock.Runtime, sectors bitfield.BitFi dealIDs = dealIDs[size:] } { - sectorPower = miner.PowerForSectors(h.sectorSize, sectorInfos) + sectorPower = miner.SectorsPower(h.sectorSize, len(sectorInfos)) rt.ExpectSend(builtin.StoragePowerActorAddr, builtin.MethodsPower.UpdateClaimedPower, &power.UpdateClaimedPowerParams{ RawByteDelta: sectorPower.Raw.Neg(), QualityAdjustedDelta: sectorPower.QA.Neg(), @@ -4930,13 +4930,12 @@ func (h *actorHarness) compactPartitions(rt *mock.Runtime, deadline uint64, part } func (h *actorHarness) continuedFaultPenalty(sectors []*miner.SectorOnChainInfo) abi.TokenAmount { - _, qa := powerForSectors(h.sectorSize, sectors) + qa := miner.SectorsPower(h.sectorSize, len(sectors)) return miner.PledgePenaltyForContinuedFault(h.epochRewardSmooth, h.epochQAPowerSmooth, qa) } -func (h *actorHarness) powerPairForSectors(sectors []*miner.SectorOnChainInfo) miner.PowerPair { - rawPower, qaPower := powerForSectors(h.sectorSize, sectors) - return miner.NewPowerPair(rawPower, qaPower) +func (h *actorHarness) powerPairForSectors(sectors []*miner.SectorOnChainInfo) abi.StoragePower { + return miner.SectorsPower(h.sectorSize, len(sectors)) } func (h *actorHarness) makePreCommit(sectorNo abi.SectorNumber, challenge, expiration abi.ChainEpoch, dealIDs []abi.DealID) *miner0.SectorPreCommitInfo { @@ -5232,15 +5231,6 @@ func sectorInfoAsBitfield(infos []*miner.SectorOnChainInfo) bitfield.BitField { return bf } -func powerForSectors(sectorSize abi.SectorSize, sectors []*miner.SectorOnChainInfo) (rawBytePower, qaPower big.Int) { - rawBytePower = big.Mul(big.NewIntUnsigned(uint64(sectorSize)), big.NewIntUnsigned(uint64(len(sectors)))) - qaPower = big.Zero() - for _, s := range sectors { - qaPower = big.Add(qaPower, miner.QAPowerForSector(sectorSize, s)) - } - return rawBytePower, qaPower -} - func assertEmptyBitfield(t *testing.T, b bitfield.BitField) { empty, err := b.IsEmpty() require.NoError(t, err) diff --git a/actors/builtin/miner/monies.go b/actors/builtin/miner/monies.go index 23aa320f3..d13a880b6 100644 --- a/actors/builtin/miner/monies.go +++ b/actors/builtin/miner/monies.go @@ -70,16 +70,16 @@ var BasePenaltyForDisputedWindowPoSt = big.Mul(big.NewInt(20), builtin.TokenPrec // The projected block reward a sector would earn over some period. // Also known as "BR(t)". -// BR(t) = ProjectedRewardFraction(t) * SectorQualityAdjustedPower +// BR(t) = ProjectedRewardFraction(t) * SectorPower // ProjectedRewardFraction(t) is the sum of estimated reward over estimated total power // over all epochs in the projection period [t t+projectionDuration] -func ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { - networkQAPowerSmoothed := networkQAPowerEstimate.Estimate() - if networkQAPowerSmoothed.IsZero() { +func ExpectedRewardForPower(rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, sectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { + networkPowerSmoothed := networkPowerEstimate.Estimate() + if networkPowerSmoothed.IsZero() { return rewardEstimate.Estimate() } - expectedRewardForProvingPeriod := smoothing.ExtrapolatedCumSumOfRatio(projectionDuration, 0, rewardEstimate, networkQAPowerEstimate) - br128 := big.Mul(qaSectorPower, expectedRewardForProvingPeriod) // Q.0 * Q.128 => Q.128 + expectedRewardForProvingPeriod := smoothing.ExtrapolatedCumSumOfRatio(projectionDuration, 0, rewardEstimate, networkPowerEstimate) + br128 := big.Mul(sectorPower, expectedRewardForProvingPeriod) // Q.0 * Q.128 => Q.128 br := big.Rsh(br128, math.Precision128) return big.Max(br, big.Zero()) @@ -88,8 +88,8 @@ func ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate smoothing.Fil // BR but zero values are clamped at 1 attofil // Some uses of BR (PCD, IP) require a strictly positive value for BR derived values so // accounting variables can be used as succinct indicators of miner activity. -func ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { - br := ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, projectionDuration) +func ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, sectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { + br := ExpectedRewardForPower(rewardEstimate, networkPowerEstimate, sectorPower, projectionDuration) if br.LessThanEqual(big.Zero()) { br = abi.NewTokenAmount(1) } @@ -99,15 +99,15 @@ func ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstima // The penalty for a sector continuing faulty for another proving period. // It is a projection of the expected reward earned by the sector. // Also known as "FF(t)" -func PledgePenaltyForContinuedFault(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { - return ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, ContinuedFaultProjectionPeriod) +func PledgePenaltyForContinuedFault(rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, sectorPower abi.StoragePower) abi.TokenAmount { + return ExpectedRewardForPower(rewardEstimate, networkPowerEstimate, sectorPower, ContinuedFaultProjectionPeriod) } // Lower bound on the penalty for a terminating sector. // It is a projection of the expected reward earned by the sector. // Also known as "SP(t)" -func PledgePenaltyForTerminationLowerBound(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { - return ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, TerminationPenaltyLowerBoundProjectionPeriod) +func PledgePenaltyForTerminationLowerBound(rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, sectorPower abi.StoragePower) abi.TokenAmount { + return ExpectedRewardForPower(rewardEstimate, networkPowerEstimate, sectorPower, TerminationPenaltyLowerBoundProjectionPeriod) } // Penalty to locked pledge collateral for the termination of a sector before scheduled expiry. @@ -115,8 +115,8 @@ func PledgePenaltyForTerminationLowerBound(rewardEstimate, networkQAPowerEstimat // replacedDayReward and replacedSectorAge are the day reward and age of the replaced sector in a capacity upgrade. // They must be zero if no upgrade occurred. func PledgePenaltyForTermination(dayReward abi.TokenAmount, sectorAge abi.ChainEpoch, - twentyDayRewardAtActivation abi.TokenAmount, networkQAPowerEstimate smoothing.FilterEstimate, - qaSectorPower abi.StoragePower, rewardEstimate smoothing.FilterEstimate, replacedDayReward abi.TokenAmount, + twentyDayRewardAtActivation abi.TokenAmount, networkPowerEstimate smoothing.FilterEstimate, + sectorPower abi.StoragePower, rewardEstimate smoothing.FilterEstimate, replacedDayReward abi.TokenAmount, replacedSectorAge abi.ChainEpoch) abi.TokenAmount { // max(SP(t), BR(StartEpoch, 20d) + BR(StartEpoch, 1d) * terminationRewardFactor * min(SectorAgeInDays, 140)) // and sectorAgeInDays = sectorAge / EpochsInDay @@ -131,7 +131,7 @@ func PledgePenaltyForTermination(dayReward abi.TokenAmount, sectorAge abi.ChainE penalizedReward := big.Mul(expectedReward, TerminationRewardFactor.Numerator) return big.Max( - PledgePenaltyForTerminationLowerBound(rewardEstimate, networkQAPowerEstimate, qaSectorPower), + PledgePenaltyForTerminationLowerBound(rewardEstimate, networkPowerEstimate, sectorPower), big.Add( twentyDayRewardAtActivation, big.Div( @@ -140,20 +140,21 @@ func PledgePenaltyForTermination(dayReward abi.TokenAmount, sectorAge abi.ChainE } // The penalty for optimistically proving a sector with an invalid window PoSt. -func PledgePenaltyForInvalidWindowPoSt(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { +func PledgePenaltyForInvalidWindowPoSt(rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, sectorPower abi.StoragePower) abi.TokenAmount { return big.Add( - ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, InvalidWindowPoStProjectionPeriod), + ExpectedRewardForPower(rewardEstimate, networkPowerEstimate, sectorPower, InvalidWindowPoStProjectionPeriod), BasePenaltyForDisputedWindowPoSt, ) } -// Computes the PreCommit deposit given sector qa weight and current network conditions. +// Computes the PreCommit deposit given sector power and current network conditions. // PreCommit Deposit = BR(PreCommitDepositProjectionPeriod) -func PreCommitDepositForPower(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { - return ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate, qaSectorPower, PreCommitDepositProjectionPeriod) +// FIXME: recompute only for consensus reward +func PreCommitDepositForPower(rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, sectorPower abi.StoragePower) abi.TokenAmount { + return ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkPowerEstimate, sectorPower, PreCommitDepositProjectionPeriod) } -// Computes the pledge requirement for committing new quality-adjusted power to the network, given the current +// Computes the pledge requirement for committing new power to the network, given the current // network total and baseline power, per-epoch reward, and circulating token supply. // The pledge comprises two parts: // - storage pledge, aka IP base: a multiple of the reward expected to be earned by newly-committed power @@ -163,21 +164,21 @@ func PreCommitDepositForPower(rewardEstimate, networkQAPowerEstimate smoothing.F // IPBase(t) = BR(t, InitialPledgeProjectionPeriod) // AdditionalIP(t) = LockTarget(t)*PledgeShare(t) // LockTarget = (LockTargetFactorNum / LockTargetFactorDenom) * FILCirculatingSupply(t) -// PledgeShare(t) = sectorQAPower / max(BaselinePower(t), NetworkQAPower(t)) -func InitialPledgeForPower(qaPower, baselinePower abi.StoragePower, rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, circulatingSupply abi.TokenAmount) abi.TokenAmount { - ipBase := ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate, qaPower, InitialPledgeProjectionPeriod) +// PledgeShare(t) = sectorPower / max(BaselinePower(t), NetworkPower(t)) +func InitialPledgeForPower(power, baselinePower abi.StoragePower, rewardEstimate, networkPowerEstimate smoothing.FilterEstimate, circulatingSupply abi.TokenAmount) abi.TokenAmount { + ipBase := ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkPowerEstimate, power, InitialPledgeProjectionPeriod) lockTargetNum := big.Mul(InitialPledgeLockTarget.Numerator, circulatingSupply) lockTargetDenom := InitialPledgeLockTarget.Denominator - pledgeShareNum := qaPower - networkQAPower := networkQAPowerEstimate.Estimate() - pledgeShareDenom := big.Max(big.Max(networkQAPower, baselinePower), qaPower) // use qaPower in case others are 0 + pledgeShareNum := power + networkPower := networkPowerEstimate.Estimate() + pledgeShareDenom := big.Max(big.Max(networkPower, baselinePower), power) // use power in case others are 0 additionalIPNum := big.Mul(lockTargetNum, pledgeShareNum) additionalIPDenom := big.Mul(lockTargetDenom, pledgeShareDenom) additionalIP := big.Div(additionalIPNum, additionalIPDenom) nominalPledge := big.Add(ipBase, additionalIP) - spaceRacePledgeCap := big.Mul(InitialPledgeMaxPerByte, qaPower) + spaceRacePledgeCap := big.Mul(InitialPledgeMaxPerByte, power) return big.Min(nominalPledge, spaceRacePledgeCap) } diff --git a/actors/builtin/miner/partition_state.go b/actors/builtin/miner/partition_state.go index 620f96154..2563cf137 100644 --- a/actors/builtin/miner/partition_state.go +++ b/actors/builtin/miner/partition_state.go @@ -40,15 +40,6 @@ type Partition struct { // canceled but effective power has already been adjusted. // Not quantized. EarlyTerminated cid.Cid // AMT[ChainEpoch]BitField - - // Power of not-yet-terminated sectors (incl faulty & unproven). - LivePower PowerPair - // Power of yet-to-be-proved sectors (never faulty). - UnprovenPower PowerPair - // Power of currently-faulty sectors. FaultyPower <= LivePower. - FaultyPower PowerPair - // Power of expected-to-recover sectors. RecoveringPower <= FaultyPower. - RecoveringPower PowerPair } // Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. @@ -80,10 +71,10 @@ func ConstructPartition(store adt.Store) (*Partition, error) { Terminated: bitfield.New(), ExpirationsEpochs: emptyExpirationArrayRoot, EarlyTerminated: emptyEarlyTerminationArrayRoot, - LivePower: NewPowerPairZero(), - UnprovenPower: NewPowerPairZero(), - FaultyPower: NewPowerPairZero(), - RecoveringPower: NewPowerPairZero(), + //LivePower: NewPowerPairZero(), + //UnprovenPower: NewPowerPairZero(), + //FaultyPower: NewPowerPairZero(), + //RecoveringPower: NewPowerPairZero(), }, nil } @@ -114,117 +105,111 @@ func (p *Partition) ActiveSectors() (bitfield.BitField, error) { return active, err } -// Active power is power of non-faulty sectors. -func (p *Partition) ActivePower() PowerPair { - return p.LivePower.Sub(p.FaultyPower).Sub(p.UnprovenPower) -} +//// Active power is power of non-faulty sectors. +//func (p *Partition) ActivePower() PowerPair { +// return p.LivePower.Sub(p.FaultyPower).Sub(p.UnprovenPower) +//} // AddSectors adds new sectors to the partition. // The sectors are "live", neither faulty, recovering, nor terminated. // Each new sector's expiration is scheduled shortly after its target expiration epoch. // If proven is false, the sectors are added to the partition's unproven set. -// Returns the total power of the added sectors. -func (p *Partition) AddSectors( - store adt.Store, proven bool, sectors []*SectorOnChainInfo, ssize abi.SectorSize, quant builtin.QuantSpec, -) (PowerPair, error) { +func (p *Partition) AddSectors(store adt.Store, proven bool, sectors []*SectorOnChainInfo, quant builtin.QuantSpec) error { expirations, err := LoadExpirationQueue(store, p.ExpirationsEpochs, quant, PartitionExpirationAmtBitwidth) if err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to load sector expirations: %w", err) + return xerrors.Errorf("failed to load sector expirations: %w", err) } - snos, power, _, err := expirations.AddActiveSectors(sectors, ssize) + snos, _, err := expirations.AddActiveSectors(sectors) if err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to record new sector expirations: %w", err) + return xerrors.Errorf("failed to record new sector expirations: %w", err) } if p.ExpirationsEpochs, err = expirations.Root(); err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to store sector expirations: %w", err) + return xerrors.Errorf("failed to store sector expirations: %w", err) } if contains, err := util.BitFieldContainsAny(p.Sectors, snos); err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to check if any new sector was already in the partition: %w", err) + return xerrors.Errorf("failed to check if any new sector was already in the partition: %w", err) } else if contains { - return NewPowerPairZero(), xerrors.Errorf("not all added sectors are new") + return xerrors.Errorf("not all added sectors are new") } // Update other metadata using the calculated totals. if p.Sectors, err = bitfield.MergeBitFields(p.Sectors, snos); err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to record new sector numbers: %w", err) + return xerrors.Errorf("failed to record new sector numbers: %w", err) } - p.LivePower = p.LivePower.Add(power) if !proven { if p.Unproven, err = bitfield.MergeBitFields(p.Unproven, snos); err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to update unproven sectors bitfield: %w", err) + return xerrors.Errorf("failed to update unproven sectors bitfield: %w", err) } - p.UnprovenPower = p.UnprovenPower.Add(power) } // check invariants if err := p.ValidateState(); err != nil { - return NewPowerPairZero(), err + return err } // No change to faults, recoveries, or terminations. // No change to faulty or recovering power. - return power, nil + return nil } // marks a set of sectors faulty func (p *Partition) addFaults( store adt.Store, sectorNos bitfield.BitField, sectors []*SectorOnChainInfo, faultExpiration abi.ChainEpoch, ssize abi.SectorSize, quant builtin.QuantSpec, -) (powerDelta, newFaultyPower PowerPair, err error) { +) (activeCountDelta int64, newFaultCount uint64, err error) { // Load expiration queue queue, err := LoadExpirationQueue(store, p.ExpirationsEpochs, quant, PartitionExpirationAmtBitwidth) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to load partition queue: %w", err) + return 0, 0, xerrors.Errorf("failed to load partition queue: %w", err) } // Reschedule faults - newFaultyPower, err = queue.RescheduleAsFaults(faultExpiration, sectors, ssize) - if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to add faults to partition queue: %w", err) + if err = queue.RescheduleAsFaults(faultExpiration, sectors); err != nil { + return 0, 0, xerrors.Errorf("failed to add faults to partition queue: %w", err) } // Save expiration queue if p.ExpirationsEpochs, err = queue.Root(); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), err + return 0, 0, err } // Update partition metadata if p.Faults, err = bitfield.MergeBitFields(p.Faults, sectorNos); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), err + return 0, 0, err } // The sectors must not have been previously faulty or recovering. // No change to recoveries or terminations. - p.FaultyPower = p.FaultyPower.Add(newFaultyPower) + //p.FaultyPower = p.FaultyPower.Add(newFaultyPower) // Once marked faulty, sectors are moved out of the unproven set. unproven, err := bitfield.IntersectBitField(sectorNos, p.Unproven) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to intersect faulty sector IDs with unproven sector IDs: %w", err) + return 0, 0, xerrors.Errorf("failed to intersect faulty sector IDs with unproven sector IDs: %w", err) } p.Unproven, err = bitfield.SubtractBitField(p.Unproven, unproven) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to subtract faulty sectors from unproven sector IDs: %w", err) + return 0, 0, xerrors.Errorf("failed to subtract faulty sectors from unproven sector IDs: %w", err) } - powerDelta = newFaultyPower.Neg() + newFaultCount = uint64(len(sectors)) + activeCountDelta = -int64(newFaultCount) if unprovenInfos, err := selectSectors(sectors, unproven); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to select unproven sectors: %w", err) + return 0, 0, xerrors.Errorf("failed to select unproven sectors: %w", err) } else if len(unprovenInfos) > 0 { - lostUnprovenPower := PowerForSectors(ssize, unprovenInfos) - p.UnprovenPower = p.UnprovenPower.Sub(lostUnprovenPower) - powerDelta = powerDelta.Add(lostUnprovenPower) + lostUnprovenCount := int64(len(unprovenInfos)) + activeCountDelta = activeCountDelta + lostUnprovenCount } // check invariants if err := p.ValidateState(); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), err + return 0, 0, err } // No change to live or recovering power. - return powerDelta, newFaultyPower, nil + return activeCountDelta, newFaultCount, nil } // Declares a set of sectors faulty. Already faulty sectors are ignored, @@ -238,121 +223,119 @@ func (p *Partition) addFaults( func (p *Partition) RecordFaults( store adt.Store, sectors Sectors, sectorNos bitfield.BitField, faultExpirationEpoch abi.ChainEpoch, ssize abi.SectorSize, quant builtin.QuantSpec, -) (newFaults bitfield.BitField, powerDelta, newFaultyPower PowerPair, err error) { +) (newFaults bitfield.BitField, activeCountDelta int64, newFaultCount uint64, err error) { err = validatePartitionContainsSectors(p, sectorNos) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xc.ErrIllegalArgument.Wrapf("failed fault declaration: %w", err) + return bitfield.BitField{}, 0, 0, xc.ErrIllegalArgument.Wrapf("failed fault declaration: %w", err) } // Split declarations into declarations of new faults, and retraction of declared recoveries. retractedRecoveries, err := bitfield.IntersectBitField(p.Recoveries, sectorNos) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to intersect sectors with recoveries: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to intersect sectors with recoveries: %w", err) } newFaults, err = bitfield.SubtractBitField(sectorNos, retractedRecoveries) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to subtract recoveries from sectors: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to subtract recoveries from sectors: %w", err) } // Ignore any terminated sectors and previously declared or detected faults newFaults, err = bitfield.SubtractBitField(newFaults, p.Terminated) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to subtract terminations from faults: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to subtract terminations from faults: %w", err) } newFaults, err = bitfield.SubtractBitField(newFaults, p.Faults) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to subtract existing faults from faults: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to subtract existing faults from faults: %w", err) } // Add new faults to state. - newFaultyPower = NewPowerPairZero() - powerDelta = NewPowerPairZero() + newFaultCount = 0 + activeCountDelta = 0 if newFaultSectors, err := sectors.Load(newFaults); err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to load fault sectors: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to load fault sectors: %w", err) } else if len(newFaultSectors) > 0 { - powerDelta, newFaultyPower, err = p.addFaults(store, newFaults, newFaultSectors, faultExpirationEpoch, ssize, quant) + activeCountDelta, newFaultCount, err = p.addFaults(store, newFaults, newFaultSectors, faultExpirationEpoch, ssize, quant) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to add faults: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to add faults: %w", err) } } // Remove faulty recoveries from state. if retractedRecoverySectors, err := sectors.Load(retractedRecoveries); err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to load recovery sectors: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to load recovery sectors: %w", err) } else if len(retractedRecoverySectors) > 0 { - retractedRecoveryPower := PowerForSectors(ssize, retractedRecoverySectors) - err = p.removeRecoveries(retractedRecoveries, retractedRecoveryPower) + err = p.removeRecoveries(retractedRecoveries) if err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to remove recoveries: %w", err) + return bitfield.BitField{}, 0, 0, xerrors.Errorf("failed to remove recoveries: %w", err) } } // check invariants if err := p.ValidateState(); err != nil { - return bitfield.BitField{}, NewPowerPairZero(), NewPowerPairZero(), err + return bitfield.BitField{}, 0, 0, err } - return newFaults, powerDelta, newFaultyPower, nil + return newFaults, activeCountDelta, newFaultCount, nil } // Removes sector numbers from faults and thus from recoveries. // The sectors are removed from the Faults and Recovering bitfields, and FaultyPower and RecoveringPower reduced. // The sectors are re-scheduled for expiration shortly after their target expiration epoch. -// Returns the power of the now-recovered sectors. -func (p *Partition) RecoverFaults(store adt.Store, sectors Sectors, ssize abi.SectorSize, quant builtin.QuantSpec) (PowerPair, error) { +// Returns the count of the now-recovered sectors. +func (p *Partition) RecoverFaults(store adt.Store, sectors Sectors, ssize abi.SectorSize, quant builtin.QuantSpec) (uint64, error) { // Process recoveries, assuming the proof will be successful. // This similarly updates state. recoveredSectors, err := sectors.Load(p.Recoveries) if err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to load recovered sectors: %w", err) + return 0, xerrors.Errorf("failed to load recovered sectors: %w", err) } // Load expiration queue queue, err := LoadExpirationQueue(store, p.ExpirationsEpochs, quant, PartitionExpirationAmtBitwidth) if err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to load partition queue: %w", err) + return 0, xerrors.Errorf("failed to load partition queue: %w", err) } // Reschedule recovered - power, err := queue.RescheduleRecovered(recoveredSectors, ssize) + power, err := queue.RescheduleRecovered(recoveredSectors) if err != nil { - return NewPowerPairZero(), xerrors.Errorf("failed to reschedule faults in partition queue: %w", err) + return 0, xerrors.Errorf("failed to reschedule faults in partition queue: %w", err) } // Save expiration queue if p.ExpirationsEpochs, err = queue.Root(); err != nil { - return NewPowerPairZero(), err + return 0, err } // Update partition metadata if newFaults, err := bitfield.SubtractBitField(p.Faults, p.Recoveries); err != nil { - return NewPowerPairZero(), err + return 0, err } else { p.Faults = newFaults } p.Recoveries = bitfield.New() - // No change to live power. - // No change to unproven sectors. - p.FaultyPower = p.FaultyPower.Sub(power) - p.RecoveringPower = p.RecoveringPower.Sub(power) - // check invariants if err := p.ValidateState(); err != nil { - return NewPowerPairZero(), err + return 0, err } return power, err } // Activates unproven sectors, returning the activated power. -func (p *Partition) ActivateUnproven() PowerPair { - newPower := p.UnprovenPower - p.UnprovenPower = NewPowerPairZero() +func (p *Partition) ActivateUnproven() (uint64, error) { + newCount, err := p.Unproven.Count() + if err != nil { + return 0, xerrors.Errorf("failed to count unproven sectors: %w", err) + } + + //p.UnprovenPower = NewPowerPairZero() p.Unproven = bitfield.New() - return newPower + return newCount, nil } // Declares sectors as recovering. Non-faulty and already recovering sectors will be skipped. -func (p *Partition) DeclareFaultsRecovered(sectors Sectors, ssize abi.SectorSize, sectorNos bitfield.BitField) (err error) { +func (p *Partition) DeclareFaultsRecovered(sectorNos bitfield.BitField) (err error) { // Check that the declared sectors are actually assigned to the partition. err = validatePartitionContainsSectors(p, sectorNos) if err != nil { @@ -370,19 +353,11 @@ func (p *Partition) DeclareFaultsRecovered(sectors Sectors, ssize abi.SectorSize } // Record the new recoveries for processing at Window PoSt or deadline cron. - recoverySectors, err := sectors.Load(recoveries) - if err != nil { - return xerrors.Errorf("failed to load recovery sectors: %w", err) - } - p.Recoveries, err = bitfield.MergeBitFields(p.Recoveries, recoveries) if err != nil { return err } - power := PowerForSectors(ssize, recoverySectors) - p.RecoveringPower = p.RecoveringPower.Add(power) - // check invariants if err := p.ValidateState(); err != nil { return err @@ -394,8 +369,8 @@ func (p *Partition) DeclareFaultsRecovered(sectors Sectors, ssize abi.SectorSize return nil } -// Removes sectors from recoveries and recovering power. Assumes sectors are currently faulty and recovering.. -func (p *Partition) removeRecoveries(sectorNos bitfield.BitField, power PowerPair) (err error) { +// Removes sectors from recoveries. Assumes sectors are currently faulty and recovering.. +func (p *Partition) removeRecoveries(sectorNos bitfield.BitField) (err error) { empty, err := sectorNos.IsEmpty() if err != nil { return err @@ -407,7 +382,6 @@ func (p *Partition) removeRecoveries(sectorNos bitfield.BitField, power PowerPai if err != nil { return err } - p.RecoveringPower = p.RecoveringPower.Sub(power) // No change to faults, or terminations. // No change to faulty power. // No change to unproven or unproven power. @@ -418,50 +392,48 @@ func (p *Partition) removeRecoveries(sectorNos bitfield.BitField, power PowerPai // The old sectors must not be faulty, terminated, or unproven. // If the same sector is both removed and added, this permits rescheduling *with a change in power*, // unlike RescheduleExpirations. -// Returns the delta to power and pledge requirement. -func (p *Partition) ReplaceSectors(store adt.Store, oldSectors, newSectors []*SectorOnChainInfo, - ssize abi.SectorSize, quant builtin.QuantSpec) (PowerPair, abi.TokenAmount, error) { +// Returns the delta pledge requirement. +func (p *Partition) ReplaceSectors(store adt.Store, oldSectors, newSectors []*SectorOnChainInfo, quant builtin.QuantSpec) (abi.TokenAmount, error) { expirations, err := LoadExpirationQueue(store, p.ExpirationsEpochs, quant, PartitionExpirationAmtBitwidth) if err != nil { - return NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to load sector expirations: %w", err) + return big.Zero(), xerrors.Errorf("failed to load sector expirations: %w", err) } - oldSnos, newSnos, powerDelta, pledgeDelta, err := expirations.ReplaceSectors(oldSectors, newSectors, ssize) + oldSnos, newSnos, pledgeDelta, err := expirations.ReplaceSectors(oldSectors, newSectors) if err != nil { - return NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to replace sector expirations: %w", err) + return big.Zero(), xerrors.Errorf("failed to replace sector expirations: %w", err) } if p.ExpirationsEpochs, err = expirations.Root(); err != nil { - return NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to save sector expirations: %w", err) + return big.Zero(), xerrors.Errorf("failed to save sector expirations: %w", err) } // Check the sectors being removed are active (alive, not faulty). active, err := p.ActiveSectors() if err != nil { - return NewPowerPairZero(), big.Zero(), err + return big.Zero(), err } allActive, err := util.BitFieldContainsAll(active, oldSnos) if err != nil { - return NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to check for active sectors: %w", err) + return big.Zero(), xerrors.Errorf("failed to check for active sectors: %w", err) } else if !allActive { - return NewPowerPairZero(), big.Zero(), xerrors.Errorf("refusing to replace inactive sectors in %v (active: %v)", oldSnos, active) + return big.Zero(), xerrors.Errorf("refusing to replace inactive sectors in %v (active: %v)", oldSnos, active) } // Update partition metadata. if p.Sectors, err = bitfield.SubtractBitField(p.Sectors, oldSnos); err != nil { - return NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to remove replaced sectors: %w", err) + return big.Zero(), xerrors.Errorf("failed to remove replaced sectors: %w", err) } if p.Sectors, err = bitfield.MergeBitFields(p.Sectors, newSnos); err != nil { - return NewPowerPairZero(), big.Zero(), xerrors.Errorf("failed to add replaced sectors: %w", err) + return big.Zero(), xerrors.Errorf("failed to add replaced sectors: %w", err) } - p.LivePower = p.LivePower.Add(powerDelta) // check invariants if err := p.ValidateState(); err != nil { - return NewPowerPairZero(), big.Zero(), err + return big.Zero(), err } // No change to faults, recoveries, or terminations. // No change to faulty or recovering power. - return powerDelta, pledgeDelta, nil + return pledgeDelta, nil } // Record the epoch of any sectors expiring early, for termination fee calculation later. @@ -482,9 +454,8 @@ func (p *Partition) recordEarlyTermination(store adt.Store, epoch abi.ChainEpoch // Marks a collection of sectors as terminated. // The sectors are removed from Faults and Recoveries. // The epoch of termination is recorded for future termination fee calculation. -func (p *Partition) TerminateSectors( - store adt.Store, sectors Sectors, epoch abi.ChainEpoch, sectorNos bitfield.BitField, - ssize abi.SectorSize, quant builtin.QuantSpec) (*ExpirationSet, error) { +func (p *Partition) TerminateSectors(store adt.Store, sectors Sectors, epoch abi.ChainEpoch, sectorNos bitfield.BitField, + quant builtin.QuantSpec) (*ExpirationSet, error) { liveSectors, err := p.LiveSectors() if err != nil { return nil, err @@ -503,7 +474,7 @@ func (p *Partition) TerminateSectors( if err != nil { return nil, xerrors.Errorf("failed to load sector expirations: %w", err) } - removed, removedRecovering, err := expirations.RemoveSectors(sectorInfos, p.Faults, p.Recoveries, ssize) + removed, err := expirations.RemoveSectors(sectorInfos, p.Faults) if err != nil { return nil, xerrors.Errorf("failed to remove sector expirations: %w", err) } @@ -541,22 +512,26 @@ func (p *Partition) TerminateSectors( return nil, xerrors.Errorf("failed to remove unproven sectors: %w", err) } - p.LivePower = p.LivePower.Sub(removed.ActivePower).Sub(removed.FaultyPower) - p.FaultyPower = p.FaultyPower.Sub(removed.FaultyPower) - p.RecoveringPower = p.RecoveringPower.Sub(removedRecovering) - if unprovenInfos, err := selectSectors(sectorInfos, unprovenNos); err != nil { - return nil, xerrors.Errorf("failed to select unproven sectors: %w", err) - } else { - removedUnprovenPower := PowerForSectors(ssize, unprovenInfos) - p.UnprovenPower = p.UnprovenPower.Sub(removedUnprovenPower) - removed.ActivePower = removed.ActivePower.Sub(removedUnprovenPower) + //p.LivePower = p.LivePower.Sub(removed.ActiveCount).Sub(removed.FaultyCount) + //p.FaultyPower = p.FaultyPower.Sub(removed.FaultyCount) + //p.RecoveringPower = p.RecoveringPower.Sub(removedRecovering) + //if unprovenInfos, err := selectSectors(sectorInfos, unprovenNos); err != nil { + // return nil, xerrors.Errorf("failed to select unproven sectors: %w", err) + //} else { + //removedUnprovenPower := SectorsPower(ssize, len(unprovenInfos)) + //p.UnprovenPower = p.UnprovenPower.Sub(removedUnprovenPower) + //removed.ActiveCount = removed.ActiveCount.Sub(removedUnprovenPower) + //} + removedUnprovenCount, err := unprovenNos.Count() + if err != nil { + return nil, xerrors.Errorf("failed to count removed unproven sectors: %w", err) } + removed.ActiveCount -= removedUnprovenCount // check invariants if err := p.ValidateState(); err != nil { return nil, err } - return removed, nil } @@ -602,9 +577,6 @@ func (p *Partition) PopExpiredSectors(store adt.Store, until abi.ChainEpoch, qua } else if !noRecoveries { return nil, xerrors.Errorf("unexpected recoveries while processing expirations") } - if !p.RecoveringPower.IsZero() { - return nil, xerrors.Errorf("unexpected recovering power while processing expirations") - } // Nothing expiring now should have already terminated. alreadyTerminated, err := util.BitFieldContainsAny(p.Terminated, expiredSectors) if err != nil { @@ -620,8 +592,6 @@ func (p *Partition) PopExpiredSectors(store adt.Store, until abi.ChainEpoch, qua if p.Faults, err = bitfield.SubtractBitField(p.Faults, expiredSectors); err != nil { return nil, err } - p.LivePower = p.LivePower.Sub(popped.ActivePower.Add(popped.FaultyPower)) - p.FaultyPower = p.FaultyPower.Sub(popped.FaultyPower) // Record the epoch of any sectors expiring early, for termination fee calculation later. err = p.recordEarlyTermination(store, until, popped.EarlySectors) @@ -642,49 +612,65 @@ func (p *Partition) PopExpiredSectors(store adt.Store, until abi.ChainEpoch, qua // Returns the power delta, power that should be penalized (new faults + failed recoveries), and newly faulty power. func (p *Partition) RecordMissedPost( store adt.Store, faultExpiration abi.ChainEpoch, quant builtin.QuantSpec, -) (powerDelta, penalizedPower, newFaultyPower PowerPair, err error) { +) (activeCountDelta int64, penalizedCount, newFaultCount uint64, err error) { // Collapse tail of queue into the last entry, and mark all power faulty. // Load expiration queue queue, err := LoadExpirationQueue(store, p.ExpirationsEpochs, quant, PartitionExpirationAmtBitwidth) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to load partition queue: %w", err) + return 0, 0, 0, xerrors.Errorf("failed to load partition queue: %w", err) } if err = queue.RescheduleAllAsFaults(faultExpiration); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), xerrors.Errorf("failed to reschedule all as faults: %w", err) + return 0, 0, 0, xerrors.Errorf("failed to reschedule all as faults: %w", err) } // Save expiration queue if p.ExpirationsEpochs, err = queue.Root(); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), err + return 0, 0, 0, err } - // Compute power changes. - - // New faulty power is the total power minus already faulty. - newFaultyPower = p.LivePower.Sub(p.FaultyPower) - // Penalized power is the newly faulty power, plus the failed recovery power. - penalizedPower = p.RecoveringPower.Add(newFaultyPower) + // Compute sector state changes. + liveSectors, err := p.LiveSectors() + if err != nil { + return 0, 0, 0, xerrors.Errorf("failed to load live sectors: %w", err) + } + liveCount, err := liveSectors.Count() + if err != nil { + return 0, 0, 0, xerrors.Errorf("failed to count live sectors: %w", err) + } + faultCount, err := p.Faults.Count() + if err != nil { + return 0, 0, 0, xerrors.Errorf("failed to count faults: %w", err) + } + recoveringCount, err := p.Recoveries.Count() + if err != nil { + return 0, 0, 0, xerrors.Errorf("failed to count recoveries: %w", err) + } + unprovenCount, err := p.Unproven.Count() + if err != nil { + return 0, 0, 0, xerrors.Errorf("failed to count unproven: %w", err) + } + // New faulty sectors are the live sectors minus already faulty. + newFaultCount = liveCount - faultCount + // Penalized sectors are the newly faulty ones, plus the failed recoveries. + penalizedCount = recoveringCount + newFaultCount // The power delta is -(newFaultyPower-unproven), because unproven power // was never activated in the first place. - powerDelta = newFaultyPower.Sub(p.UnprovenPower).Neg() + activeCountDelta = -int64(newFaultCount - unprovenCount) // Update partition metadata allFaults, err := p.LiveSectors() if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), err + return 0, 0, 0, err } p.Faults = allFaults p.Recoveries = bitfield.New() p.Unproven = bitfield.New() - p.FaultyPower = p.LivePower - p.RecoveringPower = NewPowerPairZero() - p.UnprovenPower = NewPowerPairZero() // check invariants if err := p.ValidateState(); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), err + return 0, 0, 0, err } - return powerDelta, penalizedPower, newFaultyPower, nil + return activeCountDelta, penalizedCount, newFaultCount, nil } func (p *Partition) PopEarlyTerminations(store adt.Store, maxSectors uint64) (result TerminationResult, hasMore bool, err error) { @@ -780,103 +766,70 @@ func (p *Partition) PopEarlyTerminations(store adt.Store, maxSectors uint64) (re // - Skipped faults that are already declared (but not delcared recovered) are ignored. func (p *Partition) RecordSkippedFaults( store adt.Store, sectors Sectors, ssize abi.SectorSize, quant builtin.QuantSpec, faultExpiration abi.ChainEpoch, skipped bitfield.BitField, -) (powerDelta, newFaultPower, retractedRecoveryPower PowerPair, hasNewFaults bool, err error) { +) (activeCountDelta int64, newFaultCount, retractedRecoveryCount uint64, hasNewFaults bool, err error) { empty, err := skipped.IsEmpty() if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xc.ErrIllegalArgument.Wrapf("failed to check if skipped sectors is empty: %w", err) + return 0, 0, 0, false, xc.ErrIllegalArgument.Wrapf("failed to check if skipped sectors is empty: %w", err) } if empty { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, nil + return 0, 0, 0, false, nil } // Check that the declared sectors are actually in the partition. contains, err := util.BitFieldContainsAll(p.Sectors, skipped) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to check if skipped faults are in partition: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to check if skipped faults are in partition: %w", err) } else if !contains { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xc.ErrIllegalArgument.Wrapf("skipped faults contains sectors outside partition") + return 0, 0, 0, false, xc.ErrIllegalArgument.Wrapf("skipped faults contains sectors outside partition") } // Find all skipped faults that have been labeled recovered retractedRecoveries, err := bitfield.IntersectBitField(p.Recoveries, skipped) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to intersect sectors with recoveries: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to intersect sectors with recoveries: %w", err) } retractedRecoverySectors, err := sectors.Load(retractedRecoveries) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to load sectors: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to load sectors: %w", err) } - retractedRecoveryPower = PowerForSectors(ssize, retractedRecoverySectors) + retractedRecoveryCount = uint64(len(retractedRecoverySectors)) // Ignore skipped faults that are already faults or terminated. newFaults, err := bitfield.SubtractBitField(skipped, p.Terminated) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to subtract terminations from skipped: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to subtract terminations from skipped: %w", err) } newFaults, err = bitfield.SubtractBitField(newFaults, p.Faults) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to subtract existing faults from skipped: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to subtract existing faults from skipped: %w", err) } newFaultSectors, err := sectors.Load(newFaults) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to load sectors: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to load sectors: %w", err) } // Record new faults - powerDelta, newFaultPower, err = p.addFaults(store, newFaults, newFaultSectors, faultExpiration, ssize, quant) + activeCountDelta, newFaultCount, err = p.addFaults(store, newFaults, newFaultSectors, faultExpiration, ssize, quant) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to add skipped faults: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to add skipped faults: %w", err) } // Remove faulty recoveries - err = p.removeRecoveries(retractedRecoveries, retractedRecoveryPower) + err = p.removeRecoveries(retractedRecoveries) if err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, xerrors.Errorf("failed to remove recoveries: %w", err) + return 0, 0, 0, false, xerrors.Errorf("failed to remove recoveries: %w", err) } // check invariants if err := p.ValidateState(); err != nil { - return NewPowerPairZero(), NewPowerPairZero(), NewPowerPairZero(), false, err + return 0, 0, 0, false, err } - return powerDelta, newFaultPower, retractedRecoveryPower, len(newFaultSectors) > 0, nil + return activeCountDelta, newFaultCount, retractedRecoveryCount, len(newFaultSectors) > 0, nil } -// Test that invariants about partition power hold -func (p *Partition) ValidatePowerState() error { - if p.LivePower.Raw.LessThan(big.Zero()) || p.LivePower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("Partition left with negative live power: %v", p) - } - - if p.UnprovenPower.Raw.LessThan(big.Zero()) || p.UnprovenPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("Partition left with negative unproven power: %v", p) - } - - if p.FaultyPower.Raw.LessThan(big.Zero()) || p.FaultyPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("Partition left with negative faulty power: %v", p) - } - - if p.RecoveringPower.Raw.LessThan(big.Zero()) || p.RecoveringPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("Partition left with negative recovering power: %v", p) - } - - if p.UnprovenPower.Raw.GreaterThan(p.LivePower.Raw) { - return xerrors.Errorf("Partition left with invalid unproven power: %v", p) - } - - if p.FaultyPower.Raw.GreaterThan(p.LivePower.Raw) { - return xerrors.Errorf("Partition left with invalid faulty power: %v", p) - } - - if p.RecoveringPower.Raw.GreaterThan(p.LivePower.Raw) || p.RecoveringPower.Raw.GreaterThan(p.FaultyPower.Raw) { - return xerrors.Errorf("Partition left with invalid recovering power: %v", p) - } - - return nil -} - -// Test that invariants about sector bitfields hold -func (p *Partition) ValidateBFState() error { +// Test all invariants hold +func (p *Partition) ValidateState() error { // Merge unproven and faults for checks merge, err := bitfield.MultiMerge(p.Unproven, p.Faults) if err != nil { @@ -913,24 +866,11 @@ func (p *Partition) ValidateBFState() error { return nil } -// Test all invariants hold -func (p *Partition) ValidateState() error { - var err error - if err = p.ValidatePowerState(); err != nil { - return err - } - - if err = p.ValidateBFState(); err != nil { - return err - } - - return nil -} - // // PowerPair // +// FIXME delete func NewPowerPairZero() PowerPair { return NewPowerPair(big.Zero(), big.Zero()) } diff --git a/actors/builtin/miner/partition_state_test.go b/actors/builtin/miner/partition_state_test.go index c2ac3e9c2..801f69213 100644 --- a/actors/builtin/miner/partition_state_test.go +++ b/actors/builtin/miner/partition_state_test.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -36,9 +37,9 @@ func TestPartitions(t *testing.T) { store := ipld.NewADTStore(context.Background()) partition := emptyPartition(t, store) - power, err := partition.AddSectors(store, false, sectors, sectorSize, quantSpec) + err := partition.AddSectors(store, false, sectors, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, sectors) + expectedPower := miner.SectorsPower(sectorSize, len(sectors)) assert.True(t, expectedPower.Equals(power)) return store, partition @@ -49,7 +50,7 @@ func TestPartitions(t *testing.T) { power := partition.ActivateUnproven() - expectedPower := miner.PowerForSectors(sectorSize, sectors) + expectedPower := miner.SectorsPower(sectorSize, len(sectors)) assert.True(t, expectedPower.Equals(power)) return store, partition @@ -59,7 +60,7 @@ func TestPartitions(t *testing.T) { _, partition := setupUnproven(t) power := partition.ActivateUnproven() - expectedPower := miner.PowerForSectors(sectorSize, sectors) + expectedPower := miner.SectorsPower(sectorSize, len(sectors)) assert.True(t, expectedPower.Equals(power)) }) @@ -79,7 +80,7 @@ func TestPartitions(t *testing.T) { t.Run("doesn't add sectors twice", func(t *testing.T) { store, partition := setup(t) - _, err := partition.AddSectors(store, false, sectors[:1], sectorSize, quantSpec) + err := partition.AddSectors(store, false, sectors[:1], quantSpec) require.EqualError(t, err, "not all added sectors are new") }) @@ -131,7 +132,7 @@ func TestPartitions(t *testing.T) { _, powerDelta, newFaultyPower, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(7), sectorSize, quantSpec) require.NoError(t, err) - expectedFaultyPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, faultSet)) + expectedFaultyPower := miner.SectorsPower(sectorSize, 2) assert.True(t, expectedFaultyPower.Equals(newFaultyPower)) assert.True(t, powerDelta.Equals(expectedFaultyPower.Neg())) @@ -139,7 +140,7 @@ func TestPartitions(t *testing.T) { newFaults, powerDelta, newFaultyPower, err := partition.RecordFaults(store, sectorArr, faultSet, abi.ChainEpoch(3), sectorSize, quantSpec) require.NoError(t, err) assertBitfieldEquals(t, newFaults, 6) - expectedFaultyPower = miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(6))) + expectedFaultyPower = miner.SectorsPower(sectorSize, 1) assert.True(t, expectedFaultyPower.Equals(newFaultyPower)) assert.True(t, powerDelta.Equals(expectedFaultyPower.Neg())) @@ -173,7 +174,7 @@ func TestPartitions(t *testing.T) { // add 4 and 5 as recoveries recoverSet := bf(4, 5) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6), bf(4, 5, 6), bf(4, 5), bf(), bf()) @@ -190,7 +191,7 @@ func TestPartitions(t *testing.T) { // add 4 and 5 as recoveries recoverSet := bf(4, 5) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) // declaring no faults doesn't do anything. @@ -219,8 +220,8 @@ func TestPartitions(t *testing.T) { // add 4 and 5 as recoveries recoverSet := bf(4, 5) - recoveryPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, recoverSet)) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + recoveryPower := miner.SectorsPower(sectorSize, 2) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) // mark recoveries as recovered recover sectors @@ -252,11 +253,11 @@ func TestPartitions(t *testing.T) { // add 3, 4 and 5 as recoveries. 3 is not faulty so it's skipped recoverSet := bf(3, 4, 5) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) - recoveringPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, faultSet)) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, faultSet) + recoveringPower := miner.SectorsPower(sectorSize, 3) + err = partition.DeclareFaultsRecovered(faultSet) require.NoError(t, err) assert.True(t, partition.RecoveringPower.Equals(recoveringPower)) @@ -268,7 +269,7 @@ func TestPartitions(t *testing.T) { sectorArr := sectorsArr(t, store, sectors) // try to add 99 as a recovery but it's not in the partition - err := partition.DeclareFaultsRecovered(sectorArr, sectorSize, bf(99)) + err := partition.DeclareFaultsRecovered(bf(99)) require.Error(t, err) assert.Contains(t, err.Error(), "not all sectors are assigned to the partition") }) @@ -278,7 +279,7 @@ func TestPartitions(t *testing.T) { // remove 3 sectors starting with 2 oldSectors := sectors[1:4] - oldSectorPower := miner.PowerForSectors(sectorSize, oldSectors) + oldSectorPower := miner.SectorsPower(sectorSize, len(oldSectors)) oldSectorPledge := int64(1001 + 1002 + 1003) // replace 1 and add 2 new sectors @@ -290,7 +291,7 @@ func TestPartitions(t *testing.T) { newSectorPower := miner.PowerForSectors(sectorSize, newSectors) newSectorPledge := int64(3000 + 3001 + 3002) - powerDelta, pledgeDelta, err := partition.ReplaceSectors(store, oldSectors, newSectors, sectorSize, quantSpec) + pledgeDelta, err := partition.ReplaceSectors(store, oldSectors, newSectors, quantSpec) require.NoError(t, err) expectedPowerDelta := newSectorPower.Sub(oldSectorPower) @@ -327,7 +328,7 @@ func TestPartitions(t *testing.T) { testSector(10, 2, 150, 260, 3000), } - _, _, err = partition.ReplaceSectors(store, oldSectors, newSectors, sectorSize, quantSpec) + _, err = partition.ReplaceSectors(store, oldSectors, newSectors, quantSpec) require.Error(t, err) assert.Contains(t, err.Error(), "refusing to replace inactive sectors") }) @@ -343,7 +344,7 @@ func TestPartitions(t *testing.T) { testSector(10, 2, 150, 260, 3000), } - _, _, err := partition.ReplaceSectors(store, oldSectors, newSectors, sectorSize, quantSpec) + _, err := partition.ReplaceSectors(store, oldSectors, newSectors, quantSpec) require.Error(t, err) assert.Contains(t, err.Error(), "refusing to replace inactive sectors") }) @@ -356,13 +357,9 @@ func TestPartitions(t *testing.T) { sectorArr := sectorsArr(t, store, allSectors) // Add an unproven sector. - power, err := partition.AddSectors( - store, false, - []*miner.SectorOnChainInfo{unprovenSector}, - sectorSize, quantSpec, - ) + err := partition.AddSectors(store, false, []*miner.SectorOnChainInfo{unprovenSector}, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, []*miner.SectorOnChainInfo{unprovenSector}) + expectedPower := miner.SectorPower(sectorSize) assert.True(t, expectedPower.Equals(power)) // fault sector 3, 4, 5 and 6 @@ -372,19 +369,19 @@ func TestPartitions(t *testing.T) { // mark 4and 5 as a recoveries recoverSet := bf(4, 5) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) // now terminate 1, 3, 5, and 7 terminations := bf(1, 3, 5, 7) terminationEpoch := abi.ChainEpoch(3) - removed, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, sectorSize, quantSpec) + removed, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, quantSpec) require.NoError(t, err) expectedActivePower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(1))) - assert.True(t, expectedActivePower.Equals(removed.ActivePower)) + assert.True(t, expectedActivePower.Equals(removed.ActiveCount)) expectedFaultyPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(3, 5))) - assert.True(t, expectedFaultyPower.Equals(removed.FaultyPower)) + assert.True(t, expectedFaultyPower.Equals(removed.FaultyCount)) // expect partition state to no longer reflect power and pledge from terminated sectors and terminations to contain new sectors assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6, 7), bf(4, 6), bf(4), terminations, bf()) @@ -410,7 +407,7 @@ func TestPartitions(t *testing.T) { terminations := bf(99) terminationEpoch := abi.ChainEpoch(3) - _, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, sectorSize, quantSpec) + _, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, quantSpec) require.EqualError(t, err, "can only terminate live sectors") }) @@ -422,17 +419,17 @@ func TestPartitions(t *testing.T) { terminationEpoch := abi.ChainEpoch(3) // First termination works. - removed, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, sectorSize, quantSpec) + removed, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, quantSpec) require.NoError(t, err) - expectedActivePower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(1))) - assert.True(t, expectedActivePower.Equals(removed.ActivePower)) - assert.True(t, removed.FaultyPower.Equals(miner.NewPowerPairZero())) + expectedActivePower := miner.SectorPower(sectorSize) + assert.True(t, expectedActivePower.Equals(removed.ActiveCount)) + assert.True(t, removed.FaultyCount.Equals(miner.NewPowerPairZero())) count, err := removed.Count() require.NoError(t, err) assert.EqualValues(t, 1, count) // Second termination fails - _, err = partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, sectorSize, quantSpec) + _, err = partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, quantSpec) require.EqualError(t, err, "can only terminate live sectors") }) @@ -444,7 +441,7 @@ func TestPartitions(t *testing.T) { terminationEpoch := abi.ChainEpoch(3) // Termination works. - _, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, sectorSize, quantSpec) + _, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, quantSpec) require.NoError(t, err) // Fault declaration for terminated sectors fails. @@ -474,10 +471,10 @@ func TestPartitions(t *testing.T) { assert.Equal(t, abi.NewTokenAmount(1000+1001), expset.OnTimePledge) // active power only contains power from non-faulty sectors - assert.True(t, expset.ActivePower.Equals(miner.PowerForSectors(sectorSize, sectors[:2]))) + assert.True(t, expset.ActiveCount.Equals(miner.PowerForSectors(sectorSize, sectors[:2]))) // faulty power comes from early termination - assert.True(t, expset.FaultyPower.Equals(miner.PowerForSectors(sectorSize, sectors[3:4]))) + assert.True(t, expset.FaultyCount.Equals(miner.PowerForSectors(sectorSize, sectors[3:4]))) // expect sectors to be moved to terminations assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6), bf(), bf(), bf(1, 2, 4), bf()) @@ -506,7 +503,7 @@ func TestPartitions(t *testing.T) { require.NoError(t, err) // add a recovery - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, bf(5)) + err = partition.DeclareFaultsRecovered(bf(5)) require.NoError(t, err) // pop first expiration set @@ -534,13 +531,9 @@ func TestPartitions(t *testing.T) { sectorArr := sectorsArr(t, store, allSectors) // Add an unproven sector. - power, err := partition.AddSectors( - store, false, - []*miner.SectorOnChainInfo{unprovenSector}, - sectorSize, quantSpec, - ) + err := partition.AddSectors(store, false, []*miner.SectorOnChainInfo{unprovenSector}, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, []*miner.SectorOnChainInfo{unprovenSector}) + expectedPower := miner.SectorPower(sectorSize) assert.True(t, expectedPower.Equals(power)) // make 4, 5 and 6 faulty @@ -550,7 +543,7 @@ func TestPartitions(t *testing.T) { // add 4 and 5 as recoveries recoverSet := bf(4, 5) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) // record entire partition as faulted @@ -561,8 +554,7 @@ func TestPartitions(t *testing.T) { assert.True(t, expectedNewFaultPower.Equals(newFaultyPower)) // 6 has always been faulty, so we shouldn't be penalized for it (except ongoing). - expectedPenalizedPower := miner.PowerForSectors(sectorSize, allSectors). - Sub(miner.PowerForSector(sectorSize, allSectors[5])) + expectedPenalizedPower := miner.SectorsPower(sectorSize, len(allSectors)) assert.True(t, expectedPenalizedPower.Equals(penalizedPower)) // We should lose power for sectors 1-3. @@ -590,13 +582,13 @@ func TestPartitions(t *testing.T) { // mark 4and 5 as a recoveries recoverSet := bf(4, 5) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) // now terminate 1, 3 and 5 terminations := bf(1, 3, 5) terminationEpoch := abi.ChainEpoch(3) - _, err = partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, sectorSize, quantSpec) + _, err = partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, quantSpec) require.NoError(t, err) // pop first termination @@ -653,9 +645,9 @@ func TestPartitions(t *testing.T) { } sectorNos := bf(ids...) - power, err := partition.AddSectors(store, false, manySectors, sectorSize, builtin.NoQuantization) + err := partition.AddSectors(store, false, manySectors, builtin.NoQuantization) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, manySectors) + expectedPower := miner.SectorsPower(sectorSize, len(manySectors)) assert.True(t, expectedPower.Equals(power)) assertPartitionState( @@ -700,9 +692,9 @@ func TestRecordSkippedFaults(t *testing.T) { store := ipld.NewADTStore(context.Background()) partition := emptyPartition(t, store) - power, err := partition.AddSectors(store, true, sectors, sectorSize, quantSpec) + err := partition.AddSectors(store, true, sectors, quantSpec) require.NoError(t, err) - expectedPower := miner.PowerForSectors(sectorSize, sectors) + expectedPower := miner.SectorsPower(sectorSize, len(sectors)) assert.True(t, expectedPower.Equals(power)) return store, partition @@ -732,7 +724,7 @@ func TestRecordSkippedFaults(t *testing.T) { // terminate 1 AND 2 terminations := bf(1, 2) terminationEpoch := abi.ChainEpoch(3) - _, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, sectorSize, quantSpec) + _, err := partition.TerminateSectors(store, sectorArr, terminationEpoch, terminations, quantSpec) require.NoError(t, err) assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6), bf(), bf(), terminations, bf()) @@ -747,7 +739,7 @@ func TestRecordSkippedFaults(t *testing.T) { powerDelta, newFaultPower, retractedPower, newFaults, err := partition.RecordSkippedFaults(store, sectorArr, sectorSize, quantSpec, exp, skipped) require.NoError(t, err) require.EqualValues(t, miner.NewPowerPairZero(), retractedPower) - expectedFaultyPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(3))) + expectedFaultyPower := miner.SectorPower(sectorSize) require.EqualValues(t, expectedFaultyPower, newFaultPower) require.EqualValues(t, powerDelta, newFaultPower.Neg()) require.True(t, newFaults) @@ -766,7 +758,7 @@ func TestRecordSkippedFaults(t *testing.T) { // add 4 and 5 as recoveries recoverSet := bf(4, 5) - err = partition.DeclareFaultsRecovered(sectorArr, sectorSize, recoverSet) + err = partition.DeclareFaultsRecovered(recoverSet) require.NoError(t, err) assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6), bf(4, 5, 6), bf(4, 5), bf(), bf()) @@ -778,12 +770,12 @@ func TestRecordSkippedFaults(t *testing.T) { require.True(t, newFaults) // only 1 is marked for fault power as 4 & 5 are recovering - expectedFaultyPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(1))) + expectedFaultyPower := miner.SectorPower(sectorSize) require.EqualValues(t, expectedFaultyPower, newFaultPower) require.EqualValues(t, expectedFaultyPower.Neg(), powerDelta) // 4 & 5 are marked for recovery power - expectedRecoveryPower := miner.PowerForSectors(sectorSize, selectSectors(t, sectors, bf(4, 5))) + expectedRecoveryPower := miner.SectorsPower(sectorSize, 2) require.EqualValues(t, expectedRecoveryPower, recoveryPower) assertPartitionState(t, store, partition, quantSpec, sectorSize, sectors, bf(1, 2, 3, 4, 5, 6), bf(1, 4, 5, 6), bf(), bf(), bf()) @@ -845,7 +837,7 @@ func assertPartitionState(t *testing.T, assertBitfieldsEqual(t, allSectorIds, partition.Sectors) msgs := &builtin.MessageAccumulator{} - _ = miner.CheckPartitionStateInvariants(partition, store, quant, sectorSize, sectorsAsMap(sectors), msgs) + _ = miner.CheckPartitionStateInvariants(partition, store, quant, sectorsAsMap(sectors), msgs) assert.True(t, msgs.IsEmpty(), strings.Join(msgs.Messages(), "\n")) } diff --git a/actors/builtin/miner/policy.go b/actors/builtin/miner/policy.go index 14e60ecca..05e435ec7 100644 --- a/actors/builtin/miner/policy.go +++ b/actors/builtin/miner/policy.go @@ -218,49 +218,6 @@ const DealLimitDenominator = 134217728 // PARAM_SPEC // for permissioned actor methods and winning block elections. const ConsensusFaultIneligibilityDuration = ChainFinality -// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector. -// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector. -// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier. -// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier. -// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier. -// SectorQuality of a sector is a weighted average of multipliers based on their proportions. -func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality { - // sectorSpaceTime = size * duration - sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration))) - // totalDealSpaceTime = dealWeight + verifiedWeight - totalDealSpaceTime := big.Add(dealWeight, verifiedWeight) - - // Base - all size * duration of non-deals - // weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier - weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier) - // Deal - all deal size * deal duration * 10 - // weightedDealSpaceTime = dealWeight * DealWeightMultiplier - weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier) - // Verified - all verified deal size * verified deal duration * 100 - // weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier - weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier) - // Sum - sum of all spacetime - // weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime - weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime) - // scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20 - scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision) - - // Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10) - return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier) -} - -// The power for a sector size, committed duration, and weight. -func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - quality := QualityForWeight(size, duration, dealWeight, verifiedWeight) - return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision) -} - -// The quality-adjusted power for a sector. -func QAPowerForSector(size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower { - duration := sector.Expiration - sector.Activation - return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight) -} - // Determine maximum number of deal miner's sector can hold func SectorDealsMax(size abi.SectorSize) uint64 { return max64(256, uint64(size/DealLimitDenominator)) @@ -295,7 +252,7 @@ func RewardForConsensusSlashReport(epochReward abi.TokenAmount) abi.TokenAmount } // The reward given for successfully disputing a window post. -func RewardForDisputedWindowPoSt(proofType abi.RegisteredPoStProof, disputedPower PowerPair) abi.TokenAmount { +func RewardForDisputedWindowPoSt(proofType abi.RegisteredPoStProof, disputedPower abi.StoragePower) abi.TokenAmount { // This is currently just the base. In the future, the fee may scale based on the disputed power. return BaseRewardForDisputedWindowPoSt } diff --git a/actors/builtin/miner/policy_test.go b/actors/builtin/miner/policy_test.go index bca6f8da0..477471879 100644 --- a/actors/builtin/miner/policy_test.go +++ b/actors/builtin/miner/policy_test.go @@ -135,7 +135,7 @@ func TestPower(t *testing.T) { } func weight(size abi.SectorSize, duration abi.ChainEpoch) big.Int { - return big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration))) + return big.Mul(miner.SectorPower(size), big.NewInt(int64(duration))) } func assertEqual(t *testing.T, a, b big.Int) { diff --git a/actors/builtin/miner/testing.go b/actors/builtin/miner/testing.go index 01c36cd4e..9a933f037 100644 --- a/actors/builtin/miner/testing.go +++ b/actors/builtin/miner/testing.go @@ -17,24 +17,25 @@ type DealSummary struct { } type StateSummary struct { - LivePower PowerPair - ActivePower PowerPair - FaultyPower PowerPair + LiveCount uint64 + ActiveCount uint64 + FaultyCount uint64 Deals map[abi.DealID]DealSummary WindowPoStProofType abi.RegisteredPoStProof DeadlineCronActive bool + SectorSize abi.SectorSize } // Checks internal invariants of init state. func CheckStateInvariants(st *State, store adt.Store, balance abi.TokenAmount) (*StateSummary, *builtin.MessageAccumulator) { acc := &builtin.MessageAccumulator{} - sectorSize := abi.SectorSize(0) minerSummary := &StateSummary{ - LivePower: NewPowerPairZero(), - ActivePower: NewPowerPairZero(), - FaultyPower: NewPowerPairZero(), + LiveCount: 0, + ActiveCount: 0, + FaultyCount: 0, WindowPoStProofType: 0, DeadlineCronActive: st.DeadlineCronActive, + SectorSize: abi.SectorSize(0), } // Load data from linked structures. @@ -44,7 +45,7 @@ func CheckStateInvariants(st *State, store adt.Store, balance abi.TokenAmount) ( return minerSummary, acc } else { minerSummary.WindowPoStProofType = info.WindowPoStProofType - sectorSize = info.SectorSize + minerSummary.SectorSize = info.SectorSize CheckMinerInfo(info, acc) } @@ -103,11 +104,11 @@ func CheckStateInvariants(st *State, store adt.Store, balance abi.TokenAmount) ( err = deadlines.ForEach(store, func(dlIdx uint64, dl *Deadline) error { acc := acc.WithPrefix("deadline %d: ", dlIdx) // Shadow quant := st.QuantSpecForDeadline(dlIdx) - dlSummary := CheckDeadlineStateInvariants(dl, store, quant, sectorSize, allSectors, acc) + dlSummary := CheckDeadlineStateInvariants(dl, store, quant, allSectors, acc) - minerSummary.LivePower = minerSummary.LivePower.Add(dlSummary.LivePower) - minerSummary.ActivePower = minerSummary.ActivePower.Add(dlSummary.ActivePower) - minerSummary.FaultyPower = minerSummary.FaultyPower.Add(dlSummary.FaultyPower) + minerSummary.LiveCount += dlSummary.LiveCount + minerSummary.ActiveCount += dlSummary.ActiveCount + minerSummary.FaultyCount += dlSummary.FaultyCount return nil }) acc.RequireNoError(err, "error iterating deadlines") @@ -123,12 +124,12 @@ type DeadlineStateSummary struct { RecoveringSectors bitfield.BitField UnprovenSectors bitfield.BitField TerminatedSectors bitfield.BitField - LivePower PowerPair - ActivePower PowerPair - FaultyPower PowerPair + LiveCount uint64 + ActiveCount uint64 + FaultyCount uint64 } -func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant builtin.QuantSpec, ssize abi.SectorSize, +func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant builtin.QuantSpec, sectors map[abi.SectorNumber]*SectorOnChainInfo, acc *builtin.MessageAccumulator) *DeadlineStateSummary { // Load linked structures. @@ -143,9 +144,9 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant bui RecoveringSectors: bitfield.New(), UnprovenSectors: bitfield.New(), TerminatedSectors: bitfield.New(), - LivePower: NewPowerPairZero(), - ActivePower: NewPowerPairZero(), - FaultyPower: NewPowerPairZero(), + LiveCount: 0, + ActiveCount: 0, + FaultyCount: 0, } } @@ -155,9 +156,9 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant bui var allRecoveringSectors []bitfield.BitField var allUnprovenSectors []bitfield.BitField var allTerminatedSectors []bitfield.BitField - allLivePower := NewPowerPairZero() - allActivePower := NewPowerPairZero() - allFaultyPower := NewPowerPairZero() + allLiveCount := uint64(0) + allActiveCount := uint64(0) + allFaultyCount := uint64(0) // Check partitions. partitionsWithExpirations := map[abi.ChainEpoch][]uint64{} @@ -171,7 +172,7 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant bui partitionCount++ acc := acc.WithPrefix("partition %d: ", pIdx) // Shadow - summary := CheckPartitionStateInvariants(&partition, store, quant, ssize, sectors, acc) + summary := CheckPartitionStateInvariants(&partition, store, quant, sectors, acc) if contains, err := util.BitFieldContainsAny(allSectors, summary.AllSectors); err != nil { acc.Addf("error checking bitfield contains: %v", err) @@ -196,9 +197,9 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant bui allRecoveringSectors = append(allRecoveringSectors, summary.RecoveringSectors) allUnprovenSectors = append(allUnprovenSectors, summary.UnprovenSectors) allTerminatedSectors = append(allTerminatedSectors, summary.TerminatedSectors) - allLivePower = allLivePower.Add(summary.LivePower) - allActivePower = allActivePower.Add(summary.ActivePower) - allFaultyPower = allFaultyPower.Add(summary.FaultyPower) + allLiveCount += summary.LiveCount + allActiveCount += summary.ActiveCount + allFaultyCount += summary.FaultyCount return nil }) acc.RequireNoError(err, "error iterating partitions") @@ -222,14 +223,14 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant bui err = partitionsSnapshot.ForEach(&partition, func(i int64) error { acc := acc.WithPrefix("partition snapshot %d: ", i) // Shadow - acc.Require(partition.RecoveringPower.IsZero(), "snapshot partition has recovering power") + //acc.Require(partition.RecoveringPower.IsZero(), "snapshot partition has recovering power") if noRecoveries, err := partition.Recoveries.IsEmpty(); err != nil { acc.Addf("error counting recoveries: %v", err) } else { acc.Require(noRecoveries, "snapshot partition has pending recoveries") } - acc.Require(partition.UnprovenPower.IsZero(), "snapshot partition has unproven power") + //acc.Require(partition.UnprovenPower.IsZero(), "snapshot partition has unproven power") if noUnproven, err := partition.Unproven.IsEmpty(); err != nil { acc.Addf("error counting unproven: %v", err) } else { @@ -296,7 +297,7 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant bui terminated = bitfield.New() } - acc.Require(deadline.FaultyPower.Equals(allFaultyPower), "deadline faulty power %v != partitions total %v", deadline.FaultyPower, allFaultyPower) + acc.Require(deadline.FaultySectors == allFaultyCount, "deadline faulty count %v != partitions total %v", deadline.FaultySectors, allFaultyCount) { // Validate partition expiration queue contains an entry for each partition and epoch with an expiration. @@ -335,9 +336,9 @@ func CheckDeadlineStateInvariants(deadline *Deadline, store adt.Store, quant bui RecoveringSectors: recovering, UnprovenSectors: unproven, TerminatedSectors: terminated, - LivePower: allLivePower, - ActivePower: allActivePower, - FaultyPower: allFaultyPower, + LiveCount: allLiveCount, + ActiveCount: allActiveCount, + FaultyCount: allFaultyCount, } } @@ -348,10 +349,10 @@ type PartitionStateSummary struct { RecoveringSectors bitfield.BitField UnprovenSectors bitfield.BitField TerminatedSectors bitfield.BitField - LivePower PowerPair - ActivePower PowerPair - FaultyPower PowerPair - RecoveringPower PowerPair + LiveCount uint64 + ActiveCount uint64 + FaultyCount uint64 + RecoveringCount uint64 ExpirationEpochs []abi.ChainEpoch // Epochs at which some sector is scheduled to expire. EarlyTerminationCount int } @@ -360,7 +361,6 @@ func CheckPartitionStateInvariants( partition *Partition, store adt.Store, quant builtin.QuantSpec, - sectorSize abi.SectorSize, sectors map[abi.SectorNumber]*SectorOnChainInfo, acc *builtin.MessageAccumulator, ) *PartitionStateSummary { @@ -370,11 +370,35 @@ func CheckPartitionStateInvariants( acc.Addf("error computing live sectors: %v", err) irrecoverable = true } + liveCount, err := live.Count() + if err != nil { + acc.Addf("error counting live sectors: %v", err) + irrecoverable = true + } + faultyCount, err := partition.Faults.Count() + if err != nil { + + } + recoveringCount, err := partition.Recoveries.Count() + if err != nil { + + } + unprovenCount, err := partition.Unproven.Count() + if err != nil { + acc.Addf("error counting unproven sectors: %v", err) + irrecoverable = true + } + active, err := partition.ActiveSectors() if err != nil { acc.Addf("error computing active sectors: %v", err) irrecoverable = true } + activeCount, err := active.Count() + if err != nil { + acc.Addf("error counting active sectors: %v", err) + irrecoverable = true + } if irrecoverable { return &PartitionStateSummary{ @@ -384,10 +408,10 @@ func CheckPartitionStateInvariants( RecoveringSectors: partition.Recoveries, UnprovenSectors: partition.Unproven, TerminatedSectors: partition.Terminated, - LivePower: partition.LivePower, - ActivePower: partition.ActivePower(), - FaultyPower: partition.FaultyPower, - RecoveringPower: partition.RecoveringPower, + LiveCount: liveCount, + ActiveCount: activeCount, + FaultyCount: faultyCount, + RecoveringCount: recoveringCount, ExpirationEpochs: nil, EarlyTerminationCount: 0, } @@ -423,56 +447,40 @@ func CheckPartitionStateInvariants( // Validate power var liveSectors map[abi.SectorNumber]*SectorOnChainInfo var missing []abi.SectorNumber - livePower := NewPowerPairZero() - faultyPower := NewPowerPairZero() - unprovenPower := NewPowerPairZero() if liveSectors, missing, err = selectSectorsMap(sectors, live); err != nil { acc.Addf("error selecting live sectors: %v", err) } else if len(missing) > 0 { acc.Addf("live sectors missing from all sectors: %v", missing) - } else { - livePower = powerForSectors(liveSectors, sectorSize) - acc.Require(partition.LivePower.Equals(livePower), "live power was %v, expected %v", partition.LivePower, livePower) } - if unprovenSectors, missing, err := selectSectorsMap(sectors, partition.Unproven); err != nil { + if _, missing, err := selectSectorsMap(sectors, partition.Unproven); err != nil { acc.Addf("error selecting unproven sectors: %v", err) } else if len(missing) > 0 { acc.Addf("unproven sectors missing from all sectors: %v", missing) - } else { - unprovenPower = powerForSectors(unprovenSectors, sectorSize) - acc.Require(partition.UnprovenPower.Equals(unprovenPower), "unproven power was %v, expected %v", partition.UnprovenPower, unprovenPower) } - if faultySectors, missing, err := selectSectorsMap(sectors, partition.Faults); err != nil { + if _, missing, err := selectSectorsMap(sectors, partition.Faults); err != nil { acc.Addf("error selecting faulty sectors: %v", err) } else if len(missing) > 0 { acc.Addf("faulty sectors missing from all sectors: %v", missing) - } else { - faultyPower = powerForSectors(faultySectors, sectorSize) - acc.Require(partition.FaultyPower.Equals(faultyPower), "faulty power was %v, expected %v", partition.FaultyPower, faultyPower) } - if recoveringSectors, missing, err := selectSectorsMap(sectors, partition.Recoveries); err != nil { + if _, missing, err := selectSectorsMap(sectors, partition.Recoveries); err != nil { acc.Addf("error selecting recovering sectors: %v", err) } else if len(missing) > 0 { acc.Addf("recovering sectors missing from all sectors: %v", missing) - } else { - recoveringPower := powerForSectors(recoveringSectors, sectorSize) - acc.Require(partition.RecoveringPower.Equals(recoveringPower), "recovering power was %v, expected %v", partition.RecoveringPower, recoveringPower) } - activePower := livePower.Sub(faultyPower).Sub(unprovenPower) - partitionActivePower := partition.ActivePower() - acc.Require(partitionActivePower.Equals(activePower), "active power was %v, expected %v", partitionActivePower, activePower) + expectedActiveCount := liveCount - faultyCount - unprovenCount + acc.Require(activeCount == expectedActiveCount, "active count %v doesn't match expected %v", activeCount, expectedActiveCount) // Validate the expiration queue. var expirationEpochs []abi.ChainEpoch if expQ, err := LoadExpirationQueue(store, partition.ExpirationsEpochs, quant, PartitionExpirationAmtBitwidth); err != nil { acc.Addf("error loading expiration queue: %v", err) } else if liveSectors != nil { - qsummary := CheckExpirationQueue(expQ, liveSectors, partition.Faults, quant, sectorSize, acc) + qsummary := CheckExpirationQueue(expQ, liveSectors, partition.Faults, quant, acc) expirationEpochs = qsummary.ExpirationEpochs // Check the queue is compatible with partition fields @@ -498,10 +506,10 @@ func CheckPartitionStateInvariants( RecoveringSectors: partition.Recoveries, UnprovenSectors: partition.Unproven, TerminatedSectors: partition.Terminated, - LivePower: livePower, - ActivePower: activePower, - FaultyPower: partition.FaultyPower, - RecoveringPower: partition.RecoveringPower, + LiveCount: uint64(len(liveSectors)), + ActiveCount: activeCount, + FaultyCount: faultyCount, + RecoveringCount: recoveringCount, ExpirationEpochs: expirationEpochs, EarlyTerminationCount: earlyTerminationCount, } @@ -510,15 +518,15 @@ func CheckPartitionStateInvariants( type ExpirationQueueStateSummary struct { OnTimeSectors bitfield.BitField EarlySectors bitfield.BitField - ActivePower PowerPair - FaultyPower PowerPair + ActiveCount uint64 + FaultyCount uint64 OnTimePledge abi.TokenAmount ExpirationEpochs []abi.ChainEpoch } // Checks the expiration queue for consistency. func CheckExpirationQueue(expQ ExpirationQueue, liveSectors map[abi.SectorNumber]*SectorOnChainInfo, - partitionFaults bitfield.BitField, quant builtin.QuantSpec, sectorSize abi.SectorSize, acc *builtin.MessageAccumulator) *ExpirationQueueStateSummary { + partitionFaults bitfield.BitField, quant builtin.QuantSpec, acc *builtin.MessageAccumulator) *ExpirationQueueStateSummary { partitionFaultsMap, err := partitionFaults.AllMap(1 << 30) if err != nil { acc.Addf("error loading partition faults map: %v", err) @@ -529,8 +537,8 @@ func CheckExpirationQueue(expQ ExpirationQueue, liveSectors map[abi.SectorNumber var allOnTime []bitfield.BitField var allEarly []bitfield.BitField var expirationEpochs []abi.ChainEpoch - allActivePower := NewPowerPairZero() - allFaultyPower := NewPowerPairZero() + allActiveCount := uint64(0) + allFaultyCount := uint64(0) allOnTimePledge := big.Zero() firstQueueEpoch := abi.ChainEpoch(-1) var exp ExpirationSet @@ -622,19 +630,19 @@ func CheckExpirationQueue(expQ ExpirationQueue, liveSectors map[abi.SectorNumber } if activeSectors != nil && faultySectors != nil { - activeSectorsPower := powerForSectors(activeSectors, sectorSize) - acc.Require(exp.ActivePower.Equals(activeSectorsPower), "active power recorded %v doesn't match computed %v", exp.ActivePower, activeSectorsPower) + activeCount := uint64(len(activeSectors)) + acc.Require(exp.ActiveCount == activeCount, "active power recorded %v doesn't match computed %v", exp.ActiveCount, activeCount) - faultySectorsPower := powerForSectors(faultySectors, sectorSize) - acc.Require(exp.FaultyPower.Equals(faultySectorsPower), "faulty power recorded %v doesn't match computed %v", exp.FaultyPower, faultySectorsPower) + faultyCount := uint64(len(faultySectors)) + acc.Require(exp.FaultyCount == faultyCount, "faulty power recorded %v doesn't match computed %v", exp.FaultyCount, faultyCount) } acc.Require(exp.OnTimePledge.Equals(onTimeSectorsPledge), "on time pledge recorded %v doesn't match computed %v", exp.OnTimePledge, onTimeSectorsPledge) allOnTime = append(allOnTime, exp.OnTimeSectors) allEarly = append(allEarly, exp.EarlySectors) - allActivePower = allActivePower.Add(exp.ActivePower) - allFaultyPower = allFaultyPower.Add(exp.FaultyPower) + allActiveCount += exp.ActiveCount + allFaultyCount += exp.FaultyCount allOnTimePledge = big.Add(allOnTimePledge, exp.OnTimePledge) return nil }) @@ -653,8 +661,8 @@ func CheckExpirationQueue(expQ ExpirationQueue, liveSectors map[abi.SectorNumber return &ExpirationQueueStateSummary{ OnTimeSectors: unionOnTime, EarlySectors: unionEarly, - ActivePower: allActivePower, - FaultyPower: allFaultyPower, + ActiveCount: allActiveCount, + FaultyCount: allFaultyCount, OnTimePledge: allOnTimePledge, ExpirationEpochs: expirationEpochs, } @@ -821,18 +829,6 @@ func selectSectorsMap(sectors map[abi.SectorNumber]*SectorOnChainInfo, include b return included, missing, nil } -func powerForSectors(sectors map[abi.SectorNumber]*SectorOnChainInfo, ssize abi.SectorSize) PowerPair { - qa := big.Zero() - for _, s := range sectors { // nolint:nomaprange - qa = big.Add(qa, QAPowerForSector(ssize, s)) - } - - return PowerPair{ - Raw: big.Mul(big.NewIntUnsigned(uint64(ssize)), big.NewIntUnsigned(uint64(len(sectors)))), - QA: qa, - } -} - func requireContainsAll(superset, subset bitfield.BitField, acc *builtin.MessageAccumulator, msg string) { if contains, err := util.BitFieldContainsAll(superset, subset); err != nil { acc.Addf("error in BitfieldContainsAll(): %v", err) diff --git a/actors/builtin/network.go b/actors/builtin/network.go index 8f86e544a..984de7cfa 100644 --- a/actors/builtin/network.go +++ b/actors/builtin/network.go @@ -48,15 +48,15 @@ var TokenPrecision = big.NewIntUnsigned(1_000_000_000_000_000_000) var TotalFilecoin = big.Mul(big.NewIntUnsigned(2_000_000_000), TokenPrecision) // Quality multiplier for committed capacity (no deals) in a sector -var QualityBaseMultiplier = big.NewInt(10) +//var QualityBaseMultiplier = big.NewInt(10) // Quality multiplier for unverified deals in a sector -var DealWeightMultiplier = big.NewInt(10) +//var DealWeightMultiplier = big.NewInt(10) // Quality multiplier for verified deals in a sector -var VerifiedDealWeightMultiplier = big.NewInt(100) +//var VerifiedDealWeightMultiplier = big.NewInt(100) -// Precision used for making QA power calculations +// Precision used for making power calculations const SectorQualityPrecision = 20 // 1 NanoFIL diff --git a/actors/builtin/power/cbor_gen.go b/actors/builtin/power/cbor_gen.go index 27c7b3dcd..58857b147 100644 --- a/actors/builtin/power/cbor_gen.go +++ b/actors/builtin/power/cbor_gen.go @@ -14,7 +14,7 @@ import ( var _ = xerrors.Errorf -var lengthBufState = []byte{143} +var lengthBufState = []byte{140} func (t *State) MarshalCBOR(w io.Writer) error { if t == nil { @@ -37,16 +37,6 @@ func (t *State) MarshalCBOR(w io.Writer) error { return err } - // t.TotalQualityAdjPower (big.Int) (struct) - if err := t.TotalQualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalQABytesCommitted (big.Int) (struct) - if err := t.TotalQABytesCommitted.MarshalCBOR(w); err != nil { - return err - } - // t.TotalPledgeCollateral (big.Int) (struct) if err := t.TotalPledgeCollateral.MarshalCBOR(w); err != nil { return err @@ -57,18 +47,13 @@ func (t *State) MarshalCBOR(w io.Writer) error { return err } - // t.ThisEpochQualityAdjPower (big.Int) (struct) - if err := t.ThisEpochQualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - // t.ThisEpochPledgeCollateral (big.Int) (struct) if err := t.ThisEpochPledgeCollateral.MarshalCBOR(w); err != nil { return err } - // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.ThisEpochQAPowerSmoothed.MarshalCBOR(w); err != nil { + // t.ThisEpochRawBytePowerSmoothed (smoothing.FilterEstimate) (struct) + if err := t.ThisEpochRawBytePowerSmoothed.MarshalCBOR(w); err != nil { return err } @@ -146,7 +131,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 15 { + if extra != 12 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -167,24 +152,6 @@ func (t *State) UnmarshalCBOR(r io.Reader) error { return xerrors.Errorf("unmarshaling t.TotalBytesCommitted: %w", err) } - } - // t.TotalQualityAdjPower (big.Int) (struct) - - { - - if err := t.TotalQualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalQualityAdjPower: %w", err) - } - - } - // t.TotalQABytesCommitted (big.Int) (struct) - - { - - if err := t.TotalQABytesCommitted.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalQABytesCommitted: %w", err) - } - } // t.TotalPledgeCollateral (big.Int) (struct) @@ -203,15 +170,6 @@ func (t *State) UnmarshalCBOR(r io.Reader) error { return xerrors.Errorf("unmarshaling t.ThisEpochRawBytePower: %w", err) } - } - // t.ThisEpochQualityAdjPower (big.Int) (struct) - - { - - if err := t.ThisEpochQualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochQualityAdjPower: %w", err) - } - } // t.ThisEpochPledgeCollateral (big.Int) (struct) @@ -222,12 +180,12 @@ func (t *State) UnmarshalCBOR(r io.Reader) error { } } - // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) + // t.ThisEpochRawBytePowerSmoothed (smoothing.FilterEstimate) (struct) { - if err := t.ThisEpochQAPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochQAPowerSmoothed: %w", err) + if err := t.ThisEpochRawBytePowerSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochRawBytePowerSmoothed: %w", err) } } @@ -355,7 +313,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufClaim = []byte{131} +var lengthBufClaim = []byte{130} func (t *Claim) MarshalCBOR(w io.Writer) error { if t == nil { @@ -383,11 +341,6 @@ func (t *Claim) MarshalCBOR(w io.Writer) error { if err := t.RawBytePower.MarshalCBOR(w); err != nil { return err } - - // t.QualityAdjPower (big.Int) (struct) - if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { - return err - } return nil } @@ -405,7 +358,7 @@ func (t *Claim) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 3 { + if extra != 2 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -442,15 +395,6 @@ func (t *Claim) UnmarshalCBOR(r io.Reader) error { return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) } - } - // t.QualityAdjPower (big.Int) (struct) - - { - - if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) - } - } return nil } @@ -742,7 +686,55 @@ func (t *CreateMinerParams) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufCurrentTotalPowerReturn = []byte{132} +var lengthBufUpdateClaimedPowerParams = []byte{129} + +func (t *UpdateClaimedPowerParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufUpdateClaimedPowerParams); err != nil { + return err + } + + // t.RawByteDelta (big.Int) (struct) + if err := t.RawByteDelta.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *UpdateClaimedPowerParams) UnmarshalCBOR(r io.Reader) error { + *t = UpdateClaimedPowerParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.RawByteDelta (big.Int) (struct) + + { + + if err := t.RawByteDelta.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RawByteDelta: %w", err) + } + + } + return nil +} + +var lengthBufCurrentTotalPowerReturn = []byte{131} func (t *CurrentTotalPowerReturn) MarshalCBOR(w io.Writer) error { if t == nil { @@ -758,18 +750,13 @@ func (t *CurrentTotalPowerReturn) MarshalCBOR(w io.Writer) error { return err } - // t.QualityAdjPower (big.Int) (struct) - if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - // t.PledgeCollateral (big.Int) (struct) if err := t.PledgeCollateral.MarshalCBOR(w); err != nil { return err } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.QualityAdjPowerSmoothed.MarshalCBOR(w); err != nil { + // t.RawBytePowerSmoothed (smoothing.FilterEstimate) (struct) + if err := t.RawBytePowerSmoothed.MarshalCBOR(w); err != nil { return err } return nil @@ -789,7 +776,7 @@ func (t *CurrentTotalPowerReturn) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 4 { + if extra != 3 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -801,15 +788,6 @@ func (t *CurrentTotalPowerReturn) UnmarshalCBOR(r io.Reader) error { return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) } - } - // t.QualityAdjPower (big.Int) (struct) - - { - - if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) - } - } // t.PledgeCollateral (big.Int) (struct) @@ -820,12 +798,12 @@ func (t *CurrentTotalPowerReturn) UnmarshalCBOR(r io.Reader) error { } } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) + // t.RawBytePowerSmoothed (smoothing.FilterEstimate) (struct) { - if err := t.QualityAdjPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPowerSmoothed: %w", err) + if err := t.RawBytePowerSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RawBytePowerSmoothed: %w", err) } } diff --git a/actors/builtin/power/power_actor.go b/actors/builtin/power/power_actor.go index 223357497..9faea6283 100644 --- a/actors/builtin/power/power_actor.go +++ b/actors/builtin/power/power_actor.go @@ -135,7 +135,7 @@ func (a Actor) CreateMiner(rt Runtime, params *CreateMinerParams) *CreateMinerRe claims, err := adt.AsMap(adt.AsStore(rt), st.Claims, builtin.DefaultHamtBitwidth) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load claims") - err = setClaim(claims, addresses.IDAddress, &Claim{params.WindowPoStProofType, abi.NewStoragePower(0), abi.NewStoragePower(0)}) + err = setClaim(claims, addresses.IDAddress, &Claim{params.WindowPoStProofType, abi.NewStoragePower(0)}) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put power in claimed table while creating miner") st.MinerCount += 1 @@ -153,11 +153,11 @@ func (a Actor) CreateMiner(rt Runtime, params *CreateMinerParams) *CreateMinerRe } } -//type UpdateClaimedPowerParams struct { -// RawByteDelta abi.StoragePower -// QualityAdjustedDelta abi.StoragePower -//} -type UpdateClaimedPowerParams = power0.UpdateClaimedPowerParams +// Changed in v8: +// - Removed QualityAdjustedDelta +type UpdateClaimedPowerParams struct { + RawByteDelta abi.StoragePower +} // Adds or removes claimed power for the calling actor. // May only be invoked by a miner actor. @@ -169,8 +169,8 @@ func (a Actor) UpdateClaimedPower(rt Runtime, params *UpdateClaimedPowerParams) claims, err := adt.AsMap(adt.AsStore(rt), st.Claims, builtin.DefaultHamtBitwidth) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load claims") - err = st.addToClaim(claims, minerAddr, params.RawByteDelta, params.QualityAdjustedDelta) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update power raw %s, qa %s", params.RawByteDelta, params.QualityAdjustedDelta) + err = st.addToClaim(claims, minerAddr, params.RawByteDelta) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update power %s", params.RawByteDelta) st.Claims, err = claims.Root() builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush claims") @@ -229,9 +229,8 @@ func (a Actor) OnEpochTickEnd(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { // update next epoch's power and pledge values // this must come before the next epoch's rewards are calculated // so that next epoch reward reflects power added this epoch - rawBytePower, qaPower := CurrentTotalPower(&st) + rawBytePower := CurrentTotalPower(&st) st.ThisEpochPledgeCollateral = st.TotalPledgeCollateral - st.ThisEpochQualityAdjPower = qaPower st.ThisEpochRawBytePower = rawBytePower // we can now assume delta is one since cron is invoked on every epoch. st.updateSmoothedEstimate(abi.ChainEpoch(1)) @@ -305,13 +304,15 @@ func (a Actor) SubmitPoRepForBulkVerify(rt Runtime, sealInfo *proof.SealVerifyIn return nil } -// Changed since v0: -// - QualityAdjPowerSmoothed is not a pointer +// Changed in v8: +// - Removed QualityAdjPower and QualityAdjPowerSmoothed +// - Added RawBytePowerSmoothed type CurrentTotalPowerReturn struct { - RawBytePower abi.StoragePower - QualityAdjPower abi.StoragePower - PledgeCollateral abi.TokenAmount - QualityAdjPowerSmoothed smoothing.FilterEstimate + RawBytePower abi.StoragePower + //QualityAdjPower abi.StoragePower + PledgeCollateral abi.TokenAmount + //QualityAdjPowerSmoothed smoothing.FilterEstimate + RawBytePowerSmoothed smoothing.FilterEstimate } // Returns the total power and pledge recorded by the power actor. @@ -324,10 +325,9 @@ func (a Actor) CurrentTotalPower(rt Runtime, _ *abi.EmptyValue) *CurrentTotalPow rt.StateReadonly(&st) return &CurrentTotalPowerReturn{ - RawBytePower: st.ThisEpochRawBytePower, - QualityAdjPower: st.ThisEpochQualityAdjPower, - PledgeCollateral: st.ThisEpochPledgeCollateral, - QualityAdjPowerSmoothed: st.ThisEpochQAPowerSmoothed, + RawBytePower: st.ThisEpochRawBytePower, + PledgeCollateral: st.ThisEpochPledgeCollateral, + RawBytePowerSmoothed: st.ThisEpochRawBytePowerSmoothed, } } @@ -451,10 +451,10 @@ func (a Actor) processBatchProofVerifies(rt Runtime, rewret reward.ThisEpochRewa m, builtin.MethodsMiner.ConfirmSectorProofsValid, &builtin.ConfirmSectorProofsParams{ - Sectors: successful, - RewardSmoothed: rewret.ThisEpochRewardSmoothed, - RewardBaselinePower: rewret.ThisEpochBaselinePower, - QualityAdjPowerSmoothed: st.ThisEpochQAPowerSmoothed}, + Sectors: successful, + RewardSmoothed: rewret.ThisEpochRewardSmoothed, + RewardBaselinePower: rewret.ThisEpochBaselinePower, + RawBytePowerSmoothed: st.ThisEpochRawBytePowerSmoothed}, abi.NewTokenAmount(0), &builtin.Discard{}, ) @@ -515,7 +515,7 @@ func (a Actor) processDeferredCronEvents(rt Runtime, rewret reward.ThisEpochRewa params := builtin.DeferredCronEventParams{ EventPayload: event.CallbackPayload, RewardSmoothed: rewret.ThisEpochRewardSmoothed, - QualityAdjPowerSmoothed: st.ThisEpochQAPowerSmoothed, + RawBytePowerSmoothed: st.ThisEpochRawBytePowerSmoothed, } code := rt.Send( diff --git a/actors/builtin/power/power_state.go b/actors/builtin/power/power_state.go index 20762b915..3655c860a 100644 --- a/actors/builtin/power/power_state.go +++ b/actors/builtin/power/power_state.go @@ -37,18 +37,14 @@ const ProofValidationBatchAmtBitwidth = 4 type State struct { TotalRawBytePower abi.StoragePower // TotalBytesCommitted includes claims from miners below min power threshold - TotalBytesCommitted abi.StoragePower - TotalQualityAdjPower abi.StoragePower - // TotalQABytesCommitted includes claims from miners below min power threshold - TotalQABytesCommitted abi.StoragePower + TotalBytesCommitted abi.StoragePower TotalPledgeCollateral abi.TokenAmount // These fields are set once per epoch in the previous cron tick and used // for consistent values across a single epoch's state transition. - ThisEpochRawBytePower abi.StoragePower - ThisEpochQualityAdjPower abi.StoragePower + ThisEpochRawBytePower abi.StoragePower ThisEpochPledgeCollateral abi.TokenAmount - ThisEpochQAPowerSmoothed smoothing.FilterEstimate + ThisEpochRawBytePowerSmoothed smoothing.FilterEstimate MinerCount int64 // Number of miners having proven the minimum consensus power. @@ -73,9 +69,6 @@ type Claim struct { // Sum of raw byte power for a miner's sectors. RawBytePower abi.StoragePower - - // Sum of quality adjusted power for a miner's sectors. - QualityAdjPower abi.StoragePower } type CronEvent struct { @@ -94,20 +87,17 @@ func ConstructState(store adt.Store) (*State, error) { } return &State{ - TotalRawBytePower: abi.NewStoragePower(0), - TotalBytesCommitted: abi.NewStoragePower(0), - TotalQualityAdjPower: abi.NewStoragePower(0), - TotalQABytesCommitted: abi.NewStoragePower(0), - TotalPledgeCollateral: abi.NewTokenAmount(0), - ThisEpochRawBytePower: abi.NewStoragePower(0), - ThisEpochQualityAdjPower: abi.NewStoragePower(0), + TotalRawBytePower: abi.NewStoragePower(0), + TotalBytesCommitted: abi.NewStoragePower(0), + TotalPledgeCollateral: abi.NewTokenAmount(0), + ThisEpochRawBytePower: abi.NewStoragePower(0), ThisEpochPledgeCollateral: abi.NewTokenAmount(0), - ThisEpochQAPowerSmoothed: smoothing.NewEstimate(InitialQAPowerEstimatePosition, InitialQAPowerEstimateVelocity), - FirstCronEpoch: 0, - CronEventQueue: emptyCronQueueMMapCid, - Claims: emptyClaimsMapCid, - MinerCount: 0, - MinerAboveMinPowerCount: 0, + ThisEpochRawBytePowerSmoothed: smoothing.NewEstimate(InitialQAPowerEstimatePosition, InitialQAPowerEstimateVelocity), + FirstCronEpoch: 0, + CronEventQueue: emptyCronQueueMMapCid, + Claims: emptyClaimsMapCid, + MinerCount: 0, + MinerAboveMinPowerCount: 0, }, nil } @@ -150,13 +140,13 @@ func (st *State) MinerNominalPowerMeetsConsensusMinimum(s adt.Store, miner addr. } // Parameters may be negative to subtract. -func (st *State) AddToClaim(s adt.Store, miner addr.Address, power abi.StoragePower, qapower abi.StoragePower) error { +func (st *State) AddToClaim(s adt.Store, miner addr.Address, power abi.StoragePower) error { claims, err := adt.AsMap(s, st.Claims, builtin.DefaultHamtBitwidth) if err != nil { return xerrors.Errorf("failed to load claims: %w", err) } - if err := st.addToClaim(claims, miner, power, qapower); err != nil { + if err := st.addToClaim(claims, miner, power); err != nil { return xerrors.Errorf("failed to add claim: %w", err) } @@ -176,7 +166,7 @@ func (st *State) GetClaim(s adt.Store, a addr.Address) (*Claim, bool, error) { return getClaim(claims, a) } -func (st *State) addToClaim(claims *adt.Map, miner addr.Address, power abi.StoragePower, qapower abi.StoragePower) error { +func (st *State) addToClaim(claims *adt.Map, miner addr.Address, power abi.StoragePower) error { oldClaim, ok, err := getClaim(claims, miner) if err != nil { return fmt.Errorf("failed to get claim: %w", err) @@ -186,13 +176,11 @@ func (st *State) addToClaim(claims *adt.Map, miner addr.Address, power abi.Stora } // TotalBytes always update directly - st.TotalQABytesCommitted = big.Add(st.TotalQABytesCommitted, qapower) st.TotalBytesCommitted = big.Add(st.TotalBytesCommitted, power) newClaim := Claim{ WindowPoStProofType: oldClaim.WindowPoStProofType, RawBytePower: big.Add(oldClaim.RawBytePower, power), - QualityAdjPower: big.Add(oldClaim.QualityAdjPower, qapower), } minPower, err := builtin.ConsensusMinerMinPower(oldClaim.WindowPoStProofType) @@ -206,25 +194,19 @@ func (st *State) addToClaim(claims *adt.Map, miner addr.Address, power abi.Stora if prevBelow && !stillBelow { // just passed min miner size st.MinerAboveMinPowerCount++ - st.TotalQualityAdjPower = big.Add(st.TotalQualityAdjPower, newClaim.QualityAdjPower) st.TotalRawBytePower = big.Add(st.TotalRawBytePower, newClaim.RawBytePower) } else if !prevBelow && stillBelow { // just went below min miner size st.MinerAboveMinPowerCount-- - st.TotalQualityAdjPower = big.Sub(st.TotalQualityAdjPower, oldClaim.QualityAdjPower) st.TotalRawBytePower = big.Sub(st.TotalRawBytePower, oldClaim.RawBytePower) } else if !prevBelow && !stillBelow { // Was above the threshold, still above - st.TotalQualityAdjPower = big.Add(st.TotalQualityAdjPower, qapower) st.TotalRawBytePower = big.Add(st.TotalRawBytePower, power) } if newClaim.RawBytePower.LessThan(big.Zero()) { return xerrors.Errorf("negative claimed raw byte power: %v", newClaim.RawBytePower) } - if newClaim.QualityAdjPower.LessThan(big.Zero()) { - return xerrors.Errorf("negative claimed quality adjusted power: %v", newClaim.QualityAdjPower) - } if st.MinerAboveMinPowerCount < 0 { return xerrors.Errorf("negative number of miners larger than min: %v", st.MinerAboveMinPowerCount) } @@ -255,7 +237,7 @@ func (st *State) deleteClaim(claims *adt.Map, miner addr.Address) (bool, error) } // subtract from stats as if we were simply removing power - err = st.addToClaim(claims, miner, oldClaim.RawBytePower.Neg(), oldClaim.QualityAdjPower.Neg()) + err = st.addToClaim(claims, miner, oldClaim.RawBytePower.Neg()) if err != nil { return false, fmt.Errorf("failed to subtract miner power before deleting claim: %w", err) } @@ -294,8 +276,8 @@ func (st *State) appendCronEvent(events *adt.Multimap, epoch abi.ChainEpoch, eve } func (st *State) updateSmoothedEstimate(delta abi.ChainEpoch) { - filterQAPower := smoothing.LoadFilter(st.ThisEpochQAPowerSmoothed, smoothing.DefaultAlpha, smoothing.DefaultBeta) - st.ThisEpochQAPowerSmoothed = filterQAPower.NextEstimate(st.ThisEpochQualityAdjPower, delta) + filterQAPower := smoothing.LoadFilter(st.ThisEpochRawBytePowerSmoothed, smoothing.DefaultAlpha, smoothing.DefaultBeta) + st.ThisEpochRawBytePowerSmoothed = filterQAPower.NextEstimate(st.ThisEpochRawBytePower, delta) } func loadCronEvents(mmap *adt.Multimap, epoch abi.ChainEpoch) ([]CronEvent, error) { @@ -312,9 +294,6 @@ func setClaim(claims *adt.Map, a addr.Address, claim *Claim) error { if claim.RawBytePower.LessThan(big.Zero()) { return xerrors.Errorf("negative claim raw power %v", claim.RawBytePower) } - if claim.QualityAdjPower.LessThan(big.Zero()) { - return xerrors.Errorf("negative claim quality-adjusted power %v", claim.QualityAdjPower) - } if err := claims.Put(abi.AddrKey(a), claim); err != nil { return xerrors.Errorf("failed to put claim with address %s power %v: %w", a, claim, err) } @@ -323,11 +302,11 @@ func setClaim(claims *adt.Map, a addr.Address, claim *Claim) error { // CurrentTotalPower returns current power values accounting for minimum miner // and minimum power -func CurrentTotalPower(st *State) (abi.StoragePower, abi.StoragePower) { +func CurrentTotalPower(st *State) abi.StoragePower { if st.MinerAboveMinPowerCount < ConsensusMinerMinMiners { - return st.TotalBytesCommitted, st.TotalQABytesCommitted + return st.TotalBytesCommitted } - return st.TotalRawBytePower, st.TotalQualityAdjPower + return st.TotalRawBytePower } func epochKey(e abi.ChainEpoch) abi.Keyer { diff --git a/actors/builtin/power/testing.go b/actors/builtin/power/testing.go index 90614930c..7da25b627 100644 --- a/actors/builtin/power/testing.go +++ b/actors/builtin/power/testing.go @@ -31,18 +31,10 @@ func CheckStateInvariants(st *State, store adt.Store) (*StateSummary, *builtin.M // basic invariants around recorded power acc.Require(st.TotalRawBytePower.GreaterThanEqual(big.Zero()), "total raw power is negative %v", st.TotalRawBytePower) - acc.Require(st.TotalQualityAdjPower.GreaterThanEqual(big.Zero()), "total qa power is negative %v", st.TotalQualityAdjPower) acc.Require(st.TotalBytesCommitted.GreaterThanEqual(big.Zero()), "total raw power committed is negative %v", st.TotalBytesCommitted) - acc.Require(st.TotalQABytesCommitted.GreaterThanEqual(big.Zero()), "total qa power committed is negative %v", st.TotalQABytesCommitted) - acc.Require(st.TotalRawBytePower.LessThanEqual(st.TotalQualityAdjPower), - "total raw power %v is greater than total quality adjusted power %v", st.TotalRawBytePower, st.TotalQualityAdjPower) - acc.Require(st.TotalBytesCommitted.LessThanEqual(st.TotalQABytesCommitted), - "committed raw power %v is greater than committed quality adjusted power %v", st.TotalBytesCommitted, st.TotalQABytesCommitted) acc.Require(st.TotalRawBytePower.LessThanEqual(st.TotalBytesCommitted), "total raw power %v is greater than raw power committed %v", st.TotalRawBytePower, st.TotalBytesCommitted) - acc.Require(st.TotalQualityAdjPower.LessThanEqual(st.TotalQABytesCommitted), - "total qa power %v is greater than qa power committed %v", st.TotalQualityAdjPower, st.TotalQABytesCommitted) crons := CheckCronInvariants(st, store, acc) claims := CheckClaimInvariants(st, store, acc) @@ -98,9 +90,7 @@ func CheckClaimInvariants(st *State, store adt.Store, acc *builtin.MessageAccumu } committedRawPower := abi.NewStoragePower(0) - committedQAPower := abi.NewStoragePower(0) rawPower := abi.NewStoragePower(0) - qaPower := abi.NewStoragePower(0) claimsWithSufficientPowerCount := int64(0) var claim Claim err = claims.ForEach(&claim, func(key string) error { @@ -110,7 +100,6 @@ func CheckClaimInvariants(st *State, store adt.Store, acc *builtin.MessageAccumu } byAddress[addr] = claim committedRawPower = big.Add(committedRawPower, claim.RawBytePower) - committedQAPower = big.Add(committedQAPower, claim.QualityAdjPower) minPower, err := builtin.ConsensusMinerMinPower(claim.WindowPoStProofType) acc.Require(err == nil, "could not get consensus miner min power for miner %v: %v", addr, err) @@ -121,7 +110,6 @@ func CheckClaimInvariants(st *State, store adt.Store, acc *builtin.MessageAccumu if claim.RawBytePower.GreaterThanEqual(minPower) { claimsWithSufficientPowerCount += 1 rawPower = big.Add(rawPower, claim.RawBytePower) - qaPower = big.Add(qaPower, claim.QualityAdjPower) } return nil }) @@ -130,9 +118,6 @@ func CheckClaimInvariants(st *State, store adt.Store, acc *builtin.MessageAccumu acc.Require(committedRawPower.Equals(st.TotalBytesCommitted), "sum of raw power in claims %v does not match recorded bytes committed %v", committedRawPower, st.TotalBytesCommitted) - acc.Require(committedQAPower.Equals(st.TotalQABytesCommitted), - "sum of qa power in claims %v does not match recorded qa power committed %v", - committedQAPower, st.TotalQABytesCommitted) acc.Require(claimsWithSufficientPowerCount == st.MinerAboveMinPowerCount, "claims with sufficient power %d does not match MinerAboveMinPowerCount %d", @@ -140,8 +125,6 @@ func CheckClaimInvariants(st *State, store adt.Store, acc *builtin.MessageAccumu acc.Require(st.TotalRawBytePower.Equals(rawPower), "recorded raw power %v does not match raw power in claims %v", st.TotalRawBytePower, rawPower) - acc.Require(st.TotalQualityAdjPower.Equals(qaPower), - "recorded qa power %v does not match qa power in claims %v", st.TotalQualityAdjPower, qaPower) return byAddress } diff --git a/actors/builtin/reward/reward_actor.go b/actors/builtin/reward/reward_actor.go index 63b940d39..4ad2e7feb 100644 --- a/actors/builtin/reward/reward_actor.go +++ b/actors/builtin/reward/reward_actor.go @@ -25,6 +25,7 @@ func (a Actor) Exports() []interface{} { 2: a.AwardBlockReward, 3: a.ThisEpochReward, 4: a.UpdateNetworkKPI, + 5: a.ClaimVerifiedDealReward, } } @@ -178,3 +179,18 @@ func (a Actor) UpdateNetworkKPI(rt runtime.Runtime, currRealizedPower *abi.Stora }) return nil } + +func (a Actor) ClaimVerifiedDealReward(rt runtime.Runtime, currTotalVerifiedSpace *abi.StoragePower) *abi.TokenAmount { + // Note: this should be opened up to a whitelist after the FVM supports alternative markets. + rt.ValidateImmediateCallerIs(builtin.StorageMarketActorAddr) + if currTotalVerifiedSpace == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "argument should not be nil") + } + + var st State + rt.StateTransaction(&st, func() { + // FIXME send the verified deal reward for the elapsed epoch and record the current total verified space + // for calculation of the next one + }) + return nil +} diff --git a/actors/builtin/shared.go b/actors/builtin/shared.go index d39df2ac6..ab434c7fc 100644 --- a/actors/builtin/shared.go +++ b/actors/builtin/shared.go @@ -96,19 +96,21 @@ type MinerAddrs struct { ControlAddrs []addr.Address } +// Changed in v8: +// - Replaced QualityAdjPowerSmoothed with RawBytePowerSmoothed type DeferredCronEventParams struct { - EventPayload []byte - RewardSmoothed smoothing.FilterEstimate - QualityAdjPowerSmoothed smoothing.FilterEstimate + EventPayload []byte + RewardSmoothed smoothing.FilterEstimate + RawBytePowerSmoothed smoothing.FilterEstimate } -// Note: we could move this alias back to the mutually-importing packages that use it, now that they -// can instead both alias the v2 version. +// Changed in v8: +// - Replaced QualityAdjPowerSmoothed with RawBytePowerSmoothed type ConfirmSectorProofsParams struct { - Sectors []abi.SectorNumber - RewardSmoothed smoothing.FilterEstimate - RewardBaselinePower abi.StoragePower - QualityAdjPowerSmoothed smoothing.FilterEstimate + Sectors []abi.SectorNumber + RewardSmoothed smoothing.FilterEstimate + RewardBaselinePower abi.StoragePower + RawBytePowerSmoothed smoothing.FilterEstimate } // ResolveToIDAddr resolves the given address to it's ID address form. diff --git a/actors/states/check.go b/actors/states/check.go index f435ebade..ca160c3a0 100644 --- a/actors/states/check.go +++ b/actors/states/check.go @@ -163,9 +163,10 @@ func CheckMinersAgainstPower(acc *builtin.MessageAccumulator, minerSummaries map claim, ok := powerSummary.Claims[addr] acc.Require(ok, "miner %v has no power claim", addr) if ok { - claimPower := miner.NewPowerPair(claim.RawBytePower, claim.QualityAdjPower) - acc.Require(minerSummary.ActivePower.Equals(claimPower), - "miner %v computed active power %v does not match claim %v", addr, minerSummary.ActivePower, claimPower) + claimPower := claim.RawBytePower + minerPower := big.Mul(big.NewIntUnsigned(uint64(minerSummary.SectorSize)), big.NewIntUnsigned(minerSummary.ActiveCount)) + acc.Require(minerPower.Equals(claimPower), + "miner %v computed active power %v does not match claim %v", addr, minerSummary.ActiveCount, claimPower) acc.Require(minerSummary.WindowPoStProofType == claim.WindowPoStProofType, "miner seal proof type %d does not match claim proof type %d", minerSummary.WindowPoStProofType, claim.WindowPoStProofType) } diff --git a/actors/states/election.go b/actors/states/election.go index 2cba615bb..9cee0e7ff 100644 --- a/actors/states/election.go +++ b/actors/states/election.go @@ -22,7 +22,7 @@ func MinerEligibleForElection(store adt.Store, mstate *miner.State, pstate *powe return false, err } else if !found { return false, err - } else if claim.QualityAdjPower.LessThanEqual(big.Zero()) { + } else if claim.RawBytePower.LessThanEqual(big.Zero()) { return false, err } diff --git a/actors/test/commit_post_test.go b/actors/test/commit_post_test.go index 335cf9b95..2e31af557 100644 --- a/actors/test/commit_post_test.go +++ b/actors/test/commit_post_test.go @@ -172,7 +172,7 @@ func TestCommitPoStFlow(t *testing.T) { Index: pIdx, Skipped: bitfield.New(), }} - sectorPower := miner.PowerForSector(sectorSize, sector) + sectorPower := miner.SectorPower(sectorSize) submitWindowPoSt(t, tv, worker, minerAddrs.IDAddress, dlInfo, partitions, sectorPower) // miner still has initial pledge @@ -525,7 +525,7 @@ func TestBatchOnboarding(t *testing.T) { Index: pIdx, Skipped: bitfield.New(), }} - newPower := miner.PowerForSector(sectorSize, sector).Mul(big.NewInt(int64(provenCount))) + newPower := miner.SectorsPower(sectorSize, provenCount) submitWindowPoSt(t, v, worker, minerAddrs.IDAddress, dlInfo, partitions, newPower) // Miner has initial pledge diff --git a/gen/gen.go b/gen/gen.go index 95f01621d..91b513f36 100644 --- a/gen/gen.go +++ b/gen/gen.go @@ -37,8 +37,8 @@ func main() { if err := gen.WriteTupleEncodersToFile("./actors/builtin/cbor_gen.go", "builtin", builtin.MinerAddrs{}, - builtin.ConfirmSectorProofsParams{}, - builtin.DeferredCronEventParams{}, + builtin.ConfirmSectorProofsParams{}, // Changed in v8 + builtin.DeferredCronEventParams{}, // Changed in v8 // builtin.ApplyRewardParams{}, // Aliased from v2 ); err != nil { panic(err) @@ -140,7 +140,7 @@ func main() { power.CreateMinerParams{}, //power.CreateMinerReturn{}, // Aliased from v0 //power.EnrollCronEventParams{}, // Aliased from v0 - //power.UpdateClaimedPowerParams{}, // Aliased from v0 + power.UpdateClaimedPowerParams{}, // Changed in v8 power.CurrentTotalPowerReturn{}, // other types power.MinerConstructorParams{}, @@ -156,8 +156,6 @@ func main() { // market.PublishStorageDealsParams{}, // Aliased from v0 market.PublishStorageDealsReturn{}, //market.ActivateDealsParams{}, // Aliased from v0 - market.VerifyDealsForActivationParams{}, - market.VerifyDealsForActivationReturn{}, market.SectorDataSpec{}, market.ComputeDataCommitmentParams{}, market.ComputeDataCommitmentReturn{}, @@ -165,8 +163,6 @@ func main() { // other types //market.DealProposal{}, // Aliased from v0 //market.ClientDealProposal{}, - market.SectorDeals{}, - market.SectorWeights{}, market.DealState{}, ); err != nil { panic(err) diff --git a/support/vm/testing.go b/support/vm/testing.go index 81df74a5d..d0e42d1e3 100644 --- a/support/vm/testing.go +++ b/support/vm/testing.go @@ -476,7 +476,7 @@ func GetMinerBalances(t *testing.T, vm *VM, minerIdAddr address.Address) MinerBa } } -func PowerForMinerSector(t *testing.T, vm *VM, minerIdAddr address.Address, sectorNumber abi.SectorNumber) miner.PowerPair { +func PowerForMinerSector(t *testing.T, vm *VM, minerIdAddr address.Address, sectorNumber abi.SectorNumber) abi.StoragePower { var state miner.State err := vm.GetState(minerIdAddr, &state) require.NoError(t, err) @@ -487,7 +487,7 @@ func PowerForMinerSector(t *testing.T, vm *VM, minerIdAddr address.Address, sect sectorSize, err := sector.SealProof.SectorSize() require.NoError(t, err) - return miner.PowerForSector(sectorSize, sector) + return miner.SectorPower(sectorSize) } func MinerPower(t *testing.T, vm *VM, minerIdAddr address.Address) miner.PowerPair { @@ -499,7 +499,7 @@ func MinerPower(t *testing.T, vm *VM, minerIdAddr address.Address) miner.PowerPa require.NoError(t, err) require.True(t, found) - return miner.NewPowerPair(claim.RawBytePower, claim.QualityAdjPower) + return miner.NewPowerPair(claim.RawBytePower, big.Zero()) } type NetworkStats struct { @@ -539,11 +539,11 @@ func GetNetworkStats(t *testing.T, vm *VM) NetworkStats { return NetworkStats{ TotalRawBytePower: powerState.TotalRawBytePower, TotalBytesCommitted: powerState.TotalBytesCommitted, - TotalQualityAdjPower: powerState.TotalQualityAdjPower, - TotalQABytesCommitted: powerState.TotalQABytesCommitted, + TotalQualityAdjPower: big.Zero(), + TotalQABytesCommitted: big.Zero(), TotalPledgeCollateral: powerState.TotalPledgeCollateral, ThisEpochRawBytePower: powerState.ThisEpochRawBytePower, - ThisEpochQualityAdjPower: powerState.ThisEpochQualityAdjPower, + ThisEpochQualityAdjPower: big.Zero(), ThisEpochPledgeCollateral: powerState.ThisEpochPledgeCollateral, MinerCount: powerState.MinerCount, MinerAboveMinPowerCount: powerState.MinerAboveMinPowerCount,