Skip to content

Commit

Permalink
Fix F3 JSON RPC error pass through across API boundary
Browse files Browse the repository at this point in the history
Fix the issue by instantiating pointers to sentinel F3 error values and
assert that errors indeed pass through via an integration test.

Fixes #12630
  • Loading branch information
masih committed Oct 24, 2024
1 parent c6fdf95 commit 8361906
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 19 deletions.
14 changes: 7 additions & 7 deletions api/api_errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,22 +23,22 @@ var (
RPCErrors = jsonrpc.NewErrors()

// ErrF3Disabled signals that F3 consensus process is disabled.
ErrF3Disabled = errF3Disabled{}
ErrF3Disabled = &errF3Disabled{}
// ErrF3ParticipationTicketInvalid signals that F3ParticipationTicket cannot be decoded.
ErrF3ParticipationTicketInvalid = errF3ParticipationTicketInvalid{}
ErrF3ParticipationTicketInvalid = &errF3ParticipationTicketInvalid{}
// ErrF3ParticipationTicketExpired signals that the current GPBFT instance as surpassed the expiry of the ticket.
ErrF3ParticipationTicketExpired = errF3ParticipationTicketExpired{}
ErrF3ParticipationTicketExpired = &errF3ParticipationTicketExpired{}
// ErrF3ParticipationIssuerMismatch signals that the ticket is not issued by the current node.
ErrF3ParticipationIssuerMismatch = errF3ParticipationIssuerMismatch{}
ErrF3ParticipationIssuerMismatch = &errF3ParticipationIssuerMismatch{}
// ErrF3ParticipationTooManyInstances signals that participation ticket cannot be
// issued because it asks for too many instances.
ErrF3ParticipationTooManyInstances = errF3ParticipationTooManyInstances{}
ErrF3ParticipationTooManyInstances = &errF3ParticipationTooManyInstances{}
// ErrF3ParticipationTicketStartBeforeExisting signals that participation ticket
// is before the start instance of an existing lease held by the miner.
ErrF3ParticipationTicketStartBeforeExisting = errF3ParticipationTicketStartBeforeExisting{}
ErrF3ParticipationTicketStartBeforeExisting = &errF3ParticipationTicketStartBeforeExisting{}
// ErrF3NotReady signals that the F3 instance isn't ready for participation yet. The caller
// should back off and try again later.
ErrF3NotReady = errF3NotReady{}
ErrF3NotReady = &errF3NotReady{}

_ error = (*ErrOutOfGas)(nil)
_ error = (*ErrActorNotFound)(nil)
Expand Down
48 changes: 36 additions & 12 deletions itests/f3_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"testing"
"time"

lotus_api "github.com/filecoin-project/lotus/api"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p"
pubsub "github.com/libp2p/go-libp2p-pubsub"
Expand Down Expand Up @@ -157,6 +158,27 @@ func TestF3_Bootstrap(t *testing.T) {
e.requireAllMinersParticipate()
}

func TestF3_JsonRPCErrorsPassThrough(t *testing.T) {
const blocktime = 100 * time.Millisecond
e := setup(t, blocktime, kit.ThroughRPC())
n := e.nodes[0].FullNode

lease, err := n.F3Participate(e.testCtx, []byte("fish"))
require.ErrorIs(t, err, lotus_api.ErrF3ParticipationTicketInvalid)
require.Zero(t, lease)

addr, err := address.NewIDAddress(1413)
require.NoError(t, err)

ticket, err := n.F3GetOrRenewParticipationTicket(e.testCtx, addr, nil, 100)
require.ErrorIs(t, err, lotus_api.ErrF3ParticipationTooManyInstances)
require.Zero(t, ticket)

cert, err := n.F3GetCertificate(e.testCtx, 9000)
require.ErrorIs(t, err, lotus_api.ErrF3NotReady)
require.Nil(t, cert)
}

func (e *testEnv) waitTillF3Rebootstrap(timeout time.Duration) {
e.waitFor(func(n *kit.TestFullNode) bool {
// the prev epoch yet, check if we already bootstrapped and from
Expand Down Expand Up @@ -289,8 +311,8 @@ func (e *testEnv) waitFor(f func(n *kit.TestFullNode) bool, timeout time.Duratio
// The first node returned by the function is directly connected to a miner,
// and the second full-node is an observer that is not directly connected to
// a miner. The last return value is the manifest sender for the network.
func setup(t *testing.T, blocktime time.Duration) *testEnv {
return setupWithStaticManifest(t, newTestManifest(BaseNetworkName+"/1", DefaultBootstrapEpoch, blocktime), false)
func setup(t *testing.T, blocktime time.Duration, opts ...kit.NodeOpt) *testEnv {
return setupWithStaticManifest(t, newTestManifest(BaseNetworkName+"/1", DefaultBootstrapEpoch, blocktime), false, opts...)
}

func newTestManifest(networkName gpbft.NetworkName, bootstrapEpoch int64, blocktime time.Duration) *manifest.Manifest {
Expand Down Expand Up @@ -328,7 +350,7 @@ func newTestManifest(networkName gpbft.NetworkName, bootstrapEpoch int64, blockt
}
}

func setupWithStaticManifest(t *testing.T, manif *manifest.Manifest, testBootstrap bool) *testEnv {
func setupWithStaticManifest(t *testing.T, manif *manifest.Manifest, testBootstrap bool, extraOpts ...kit.NodeOpt) *testEnv {
ctx, stopServices := context.WithCancel(context.Background())
errgrp, ctx := errgroup.WithContext(ctx)

Expand All @@ -351,22 +373,24 @@ func setupWithStaticManifest(t *testing.T, manif *manifest.Manifest, testBootstr
AllowDynamicFinalize: !testBootstrap,
}

f3NOpt := kit.F3Enabled(cfg)
f3MOpt := kit.ConstructorOpts(node.Override(node.F3Participation, modules.F3Participation))
nodeOpts := []kit.NodeOpt{kit.WithAllSubsystems(), kit.F3Enabled(cfg)}
nodeOpts = append(nodeOpts, extraOpts...)
minerOpts := []kit.NodeOpt{kit.WithAllSubsystems(), kit.ConstructorOpts(node.Override(node.F3Participation, modules.F3Participation))}
minerOpts = append(minerOpts, extraOpts...)

var (
n1, n2, n3 kit.TestFullNode
m1, m2, m3, m4 kit.TestMiner
)

ens := kit.NewEnsemble(t, kit.MockProofs()).
FullNode(&n1, kit.WithAllSubsystems(), f3NOpt).
FullNode(&n2, kit.WithAllSubsystems(), f3NOpt).
FullNode(&n3, kit.WithAllSubsystems(), f3NOpt).
Miner(&m1, &n1, kit.WithAllSubsystems(), f3MOpt).
Miner(&m2, &n2, kit.WithAllSubsystems(), f3MOpt).
Miner(&m3, &n3, kit.WithAllSubsystems(), f3MOpt).
Miner(&m4, &n3, kit.WithAllSubsystems(), f3MOpt).
FullNode(&n1, nodeOpts...).
FullNode(&n2, nodeOpts...).
FullNode(&n3, nodeOpts...).
Miner(&m1, &n1, minerOpts...).
Miner(&m2, &n2, minerOpts...).
Miner(&m3, &n3, minerOpts...).
Miner(&m4, &n3, minerOpts...).
Start()

ens.InterconnectAll().BeginMining(blocktime)
Expand Down

0 comments on commit 8361906

Please sign in to comment.