From 7296141faa14d284f33058c95a7dbc7c29e45df0 Mon Sep 17 00:00:00 2001 From: Arsene <tochemey@hotmail.com> Date: Sun, 27 Oct 2024 11:37:04 +0000 Subject: [PATCH] refactor: cleanup the code to allow easy extensibility (#505) --- actors/actor_system.go | 260 ++- actors/actor_system_test.go | 2912 ++++++++++++++------------- actors/api.go | 312 --- actors/api_test.go | 2284 +++++---------------- actors/errors.go | 2 + actors/option.go | 152 +- actors/option_test.go | 1 + actors/pid.go | 236 ++- actors/pid_option.go | 7 + actors/pid_test.go | 346 +++- actors/receive_context.go | 8 +- actors/receive_context_test.go | 595 ++++-- actors/redistribution.go | 98 +- actors/remoting.go | 359 ++++ actors/remoting_test.go | 1524 ++++++++++++++ actors/scheduler.go | 158 +- actors/scheduler_test.go | 171 +- bench/benchmarkpb/benchmark.pb.go | 114 +- client/client.go | 179 +- client/client_test.go | 315 ++- client/least_load_test.go | 8 +- client/node.go | 87 +- client/round_robin_test.go | 8 +- goaktpb/goakt.pb.go | 290 +-- internal/errorschain/errorschain.go | 6 + internal/http/http.go | 8 +- internal/internalpb/actor.pb.go | 26 +- internal/internalpb/cluster.pb.go | 92 +- internal/internalpb/nats.pb.go | 26 +- internal/internalpb/peers.pb.go | 48 +- internal/internalpb/remoting.pb.go | 290 +-- protos/internal/peers.proto | 1 + test/data/testpb/test.pb.go | 642 ++---- 33 files changed, 6056 insertions(+), 5509 deletions(-) create mode 100644 actors/remoting.go create mode 100644 actors/remoting_test.go diff --git a/actors/actor_system.go b/actors/actor_system.go index 3d674318..0ad73793 100644 --- a/actors/actor_system.go +++ b/actors/actor_system.go @@ -29,7 +29,7 @@ import ( "errors" "fmt" "net" - stdhttp "net/http" + nethttp "net/http" "os" "regexp" "strconv" @@ -40,6 +40,8 @@ import ( "connectrpc.com/connect" "github.com/google/uuid" "go.uber.org/atomic" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" "golang.org/x/sync/errgroup" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" @@ -49,7 +51,6 @@ import ( "github.com/tochemey/goakt/v2/hash" "github.com/tochemey/goakt/v2/internal/cluster" "github.com/tochemey/goakt/v2/internal/eventstream" - "github.com/tochemey/goakt/v2/internal/http" "github.com/tochemey/goakt/v2/internal/internalpb" "github.com/tochemey/goakt/v2/internal/internalpb/internalpbconnect" "github.com/tochemey/goakt/v2/internal/tcp" @@ -182,12 +183,14 @@ type actorSystem struct { // Specifies whether remoting is enabled. // This allows to handle remote messaging remotingEnabled atomic.Bool + remoting *Remoting // Specifies the remoting port port int32 // Specifies the remoting host host string // Specifies the remoting server - remotingServer *stdhttp.Server + server *nethttp.Server + listener net.Listener // cluster settings clusterEnabled atomic.Bool @@ -282,7 +285,7 @@ func NewActorSystem(name string, opts ...Option) (ActorSystem, error) { } } - system.scheduler = newScheduler(system.logger, system.shutdownTimeout, withSchedulerCluster(system.cluster)) + system.scheduler = newScheduler(system.logger, system.shutdownTimeout, withSchedulerCluster(system.cluster), withSchedulerRemoting(NewRemoting())) return system, nil } @@ -681,12 +684,14 @@ func (x *actorSystem) Stop(ctx context.Context) error { defer cancel() if x.remotingEnabled.Load() { - if err := x.remotingServer.Shutdown(ctx); err != nil { + x.remoting.Close() + if err := x.shutdownHTTPServer(ctx); err != nil { return err } x.remotingEnabled.Store(false) - x.remotingServer = nil + x.server = nil + x.listener = nil } if x.clusterEnabled.Load() { @@ -833,42 +838,46 @@ func (x *actorSystem) RemoteTell(ctx context.Context, stream *connect.ClientStre eg, ctx := errgroup.WithContext(ctx) eg.SetLimit(2) - eg.Go(func() error { - defer close(requestc) - for stream.Receive() { - select { - case requestc <- stream.Msg(): - case <-ctx.Done(): - logger.Error(ctx.Err()) - return connect.NewError(connect.CodeCanceled, ctx.Err()) + eg.Go( + func() error { + defer close(requestc) + for stream.Receive() { + select { + case requestc <- stream.Msg(): + case <-ctx.Done(): + logger.Error(ctx.Err()) + return connect.NewError(connect.CodeCanceled, ctx.Err()) + } } - } - - if err := stream.Err(); err != nil { - logger.Error(err) - return connect.NewError(connect.CodeUnknown, err) - } - return nil - }) - - eg.Go(func() error { - for request := range requestc { - receiver := request.GetRemoteMessage().GetReceiver() - addr := address.New(receiver.GetName(), x.Name(), receiver.GetHost(), int(receiver.GetPort())) - pid, exist := x.actors.Get(addr) - if !exist { - logger.Error(ErrAddressNotFound(addr.String()).Error()) - return ErrAddressNotFound(addr.String()) + if err := stream.Err(); err != nil { + logger.Error(err) + return connect.NewError(connect.CodeUnknown, err) } - if err := x.handleRemoteTell(ctx, pid, request.GetRemoteMessage()); err != nil { - logger.Error(ErrRemoteSendFailure(err)) - return ErrRemoteSendFailure(err) + return nil + }, + ) + + eg.Go( + func() error { + for request := range requestc { + receiver := request.GetRemoteMessage().GetReceiver() + addr := address.New(receiver.GetName(), x.Name(), receiver.GetHost(), int(receiver.GetPort())) + pid, exist := x.actors.Get(addr) + if !exist { + logger.Error(ErrAddressNotFound(addr.String()).Error()) + return ErrAddressNotFound(addr.String()) + } + + if err := x.handleRemoteTell(ctx, pid, request.GetRemoteMessage()); err != nil { + logger.Error(ErrRemoteSendFailure(err)) + return ErrRemoteSendFailure(err) + } } - } - return nil - }) + return nil + }, + ) if err := eg.Wait(); err != nil { return nil, err @@ -953,8 +962,10 @@ func (x *actorSystem) RemoteSpawn(ctx context.Context, request *connect.Request[ actor, err := x.reflection.ActorFrom(msg.GetActorType()) if err != nil { - logger.Errorf("failed to create actor=[(%s) of type (%s)] on [host=%s, port=%d]: reason: (%v)", - msg.GetActorName(), msg.GetActorType(), msg.GetHost(), msg.GetPort(), err) + logger.Errorf( + "failed to create actor=[(%s) of type (%s)] on [host=%s, port=%d]: reason: (%v)", + msg.GetActorName(), msg.GetActorType(), msg.GetHost(), msg.GetPort(), err, + ) if errors.Is(err, ErrTypeNotRegistered) { return nil, connect.NewError(connect.CodeFailedPrecondition, ErrTypeNotRegistered) @@ -986,10 +997,12 @@ func (x *actorSystem) GetNodeMetric(_ context.Context, request *connect.Request[ } actorCount := x.actors.Size() - return connect.NewResponse(&internalpb.GetNodeMetricResponse{ - NodeRemoteAddress: remoteAddr, - ActorsCount: uint64(actorCount), - }), nil + return connect.NewResponse( + &internalpb.GetNodeMetricResponse{ + NodeRemoteAddress: remoteAddr, + ActorsCount: uint64(actorCount), + }, + ), nil } // GetKinds returns the cluster kinds @@ -1084,9 +1097,11 @@ func (x *actorSystem) enableClustering(ctx context.Context) error { } bootstrapChan := make(chan struct{}, 1) - timer := time.AfterFunc(time.Second, func() { - bootstrapChan <- struct{}{} - }) + timer := time.AfterFunc( + time.Second, func() { + bootstrapChan <- struct{}{} + }, + ) <-bootstrapChan timer.Stop() @@ -1126,23 +1141,28 @@ func (x *actorSystem) enableRemoting(ctx context.Context) { remotingServicePath, remotingServiceHandler := internalpbconnect.NewRemotingServiceHandler(x) clusterServicePath, clusterServiceHandler := internalpbconnect.NewClusterServiceHandler(x) - mux := stdhttp.NewServeMux() + mux := nethttp.NewServeMux() mux.Handle(remotingServicePath, remotingServiceHandler) mux.Handle(clusterServicePath, clusterServiceHandler) - server := http.NewServer(ctx, x.host, remotingPort, mux) + + x.locker.Lock() + // configure the appropriate server + if err := x.configureServer(ctx, mux); err != nil { + x.locker.Unlock() + x.logger.Panic(fmt.Errorf("failed enable remoting: %w", err)) + return + } + x.locker.Unlock() go func() { - if err := server.ListenAndServe(); err != nil { - if !errors.Is(err, stdhttp.ErrServerClosed) { + if err := x.startHTTPServer(); err != nil { + if !errors.Is(err, nethttp.ErrServerClosed) { x.logger.Panic(fmt.Errorf("failed to start remoting service: %w", err)) } } }() - x.locker.Lock() - x.remotingServer = server - x.locker.Unlock() - + x.remoting = NewRemoting() x.logger.Info("remoting enabled...:)") } @@ -1235,37 +1255,41 @@ func (x *actorSystem) peersStateLoop() { peersChan := make(chan *cluster.Peer) - eg.Go(func() error { - defer close(peersChan) - peers, err := x.cluster.Peers(ctx) - if err != nil { - return err - } - - for _, peer := range peers { - select { - case peersChan <- peer: - case <-ctx.Done(): - return ctx.Err() + eg.Go( + func() error { + defer close(peersChan) + peers, err := x.cluster.Peers(ctx) + if err != nil { + return err } - } - return nil - }) - eg.Go(func() error { - for peer := range peersChan { - if err := x.processPeerState(ctx, peer); err != nil { - return err + for _, peer := range peers { + select { + case peersChan <- peer: + case <-ctx.Done(): + return ctx.Err() + } } - select { - case <-ctx.Done(): - return ctx.Err() - default: - // pass + return nil + }, + ) + + eg.Go( + func() error { + for peer := range peersChan { + if err := x.processPeerState(ctx, peer); err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + // pass + } } - } - return nil - }) + return nil + }, + ) if err := eg.Wait(); err != nil { x.logger.Error(err) @@ -1365,10 +1389,12 @@ func (x *actorSystem) configPID(ctx context.Context, name string, actor Actor, o pidOpts = append(pidOpts, withPassivationAfter(x.expireActorAfter)) } - pid, err := newPID(ctx, + pid, err := newPID( + ctx, addr, actor, - pidOpts...) + pidOpts..., + ) if err != nil { return nil, err @@ -1379,17 +1405,21 @@ func (x *actorSystem) configPID(ctx context.Context, name string, actor Actor, o // getSystemActorName returns the system supervisor name func (x *actorSystem) getSystemActorName(nameType nameType) string { if x.remotingEnabled.Load() { - return fmt.Sprintf("%s%s%s-%d-%d", + return fmt.Sprintf( + "%s%s%s-%d-%d", systemNames[nameType], strings.ToTitle(x.name), x.host, x.port, - time.Now().UnixNano()) + time.Now().UnixNano(), + ) } - return fmt.Sprintf("%s%s-%d", + return fmt.Sprintf( + "%s%s-%d", systemNames[nameType], strings.ToTitle(x.name), - time.Now().UnixNano()) + time.Now().UnixNano(), + ) } func isSystemName(name string) bool { @@ -1400,3 +1430,59 @@ func isSystemName(name string) bool { func (x *actorSystem) actorAddress(name string) *address.Address { return address.New(name, x.name, x.host, int(x.port)) } + +// startHTTPServer starts the appropriate http server +func (x *actorSystem) startHTTPServer() error { + return x.server.Serve(x.listener) +} + +// shutdownHTTPServer stops the appropriate http server +func (x *actorSystem) shutdownHTTPServer(ctx context.Context) error { + return x.server.Shutdown(ctx) +} + +// configureServer configure the various http server and listeners based upon the various settings +func (x *actorSystem) configureServer(ctx context.Context, mux *nethttp.ServeMux) error { + hostPort := net.JoinHostPort(x.host, strconv.Itoa(int(x.port))) + httpServer := getServer(ctx, hostPort) + // create a tcp listener + lnr, err := net.Listen("tcp", hostPort) + if err != nil { + return err + } + + // set the http server + x.server = httpServer + // For gRPC clients, it's convenient to support HTTP/2 without TLS. + x.server.Handler = h2c.NewHandler( + mux, &http2.Server{ + IdleTimeout: 1200 * time.Second, + }, + ) + // set the non-secure http server + x.listener = lnr + return nil +} + +// getServer creates an instance of http server +func getServer(ctx context.Context, address string) *nethttp.Server { + return &nethttp.Server{ + Addr: address, + // The maximum duration for reading the entire request, including the body. + // It’s implemented in net/http by calling SetReadDeadline immediately after Accept + // ReadTimeout := handler_timeout + ReadHeaderTimeout + wiggle_room + ReadTimeout: 3 * time.Second, + // ReadHeaderTimeout is the amount of time allowed to read request headers + ReadHeaderTimeout: time.Second, + // WriteTimeout is the maximum duration before timing out writes of the response. + // It is reset whenever a new request’s header is read. + // This effectively covers the lifetime of the ServeHTTP handler stack + WriteTimeout: time.Second, + // IdleTimeout is the maximum amount of time to wait for the next request when keep-alive are enabled. + // If IdleTimeout is zero, the value of ReadTimeout is used. Not relevant to request timeouts + IdleTimeout: 1200 * time.Second, + BaseContext: func(_ net.Listener) context.Context { + return ctx + }, + } +} diff --git a/actors/actor_system_test.go b/actors/actor_system_test.go index 791af251..eb9b5050 100644 --- a/actors/actor_system_test.go +++ b/actors/actor_system_test.go @@ -51,1410 +51,1600 @@ import ( // nolint func TestActorSystem(t *testing.T) { - t.Run("New instance with Defaults", func(t *testing.T) { - actorSystem, err := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - require.NoError(t, err) - require.NotNil(t, actorSystem) - var iface any = actorSystem - _, ok := iface.(ActorSystem) - assert.True(t, ok) - assert.Equal(t, "testSys", actorSystem.Name()) - assert.Empty(t, actorSystem.Actors()) - assert.NotNil(t, actorSystem.Logger()) - }) - t.Run("New instance with Missing Name", func(t *testing.T) { - sys, err := NewActorSystem("") - assert.Error(t, err) - assert.Nil(t, sys) - assert.EqualError(t, err, ErrNameRequired.Error()) - }) - t.Run("With invalid actor system Name", func(t *testing.T) { - sys, err := NewActorSystem("$omeN@me") - assert.Error(t, err) - assert.Nil(t, sys) - assert.EqualError(t, err, ErrInvalidActorSystemName.Error()) - }) - t.Run("With Spawn an actor when not System started", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, "Test", actor) - assert.Error(t, err) - assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) - assert.Nil(t, actorRef) - }) - t.Run("With Spawn an actor when started", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, "Test", actor) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - }) - t.Run("With Spawn an actor with invalid actor name", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - require.NoError(t, err) - - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, "$omeN@me", actor) - require.Error(t, err) - assert.EqualError(t, err, "must contain only word characters (i.e. [a-zA-Z0-9] plus non-leading '-' or '_')") - assert.Nil(t, actorRef) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = sys.Stop(ctx) + t.Run( + "New instance with Defaults", func(t *testing.T) { + actorSystem, err := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + require.NoError(t, err) + require.NotNil(t, actorSystem) + var iface any = actorSystem + _, ok := iface.(ActorSystem) + assert.True(t, ok) + assert.Equal(t, "testSys", actorSystem.Name()) + assert.Empty(t, actorSystem.Actors()) + assert.NotNil(t, actorSystem.Logger()) + }, + ) + t.Run( + "New instance with Missing Name", func(t *testing.T) { + sys, err := NewActorSystem("") + assert.Error(t, err) + assert.Nil(t, sys) + assert.EqualError(t, err, ErrNameRequired.Error()) + }, + ) + t.Run( + "With invalid actor system Name", func(t *testing.T) { + sys, err := NewActorSystem("$omeN@me") + assert.Error(t, err) + assert.Nil(t, sys) + assert.EqualError(t, err, ErrInvalidActorSystemName.Error()) + }, + ) + t.Run( + "With Spawn an actor when not System started", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, "Test", actor) + assert.Error(t, err) + assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) + assert.Nil(t, actorRef) + }, + ) + t.Run( + "With Spawn an actor when started", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With Spawn an actor already exist", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("test", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - actor := newTestActor() - ref1, err := sys.Spawn(ctx, "Test", actor) - assert.NoError(t, err) - assert.NotNil(t, ref1) + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, "Test", actor) + assert.NoError(t, err) + assert.NotNil(t, actorRef) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With Spawn an actor with invalid actor name", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) + require.NoError(t, err) + + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, "$omeN@me", actor) + require.Error(t, err) + assert.EqualError(t, err, "must contain only word characters (i.e. [a-zA-Z0-9] plus non-leading '-' or '_')") + assert.Nil(t, actorRef) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With Spawn an actor already exist", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("test", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) + assert.NoError(t, err) - ref2, err := sys.Spawn(ctx, "Test", actor) - assert.NotNil(t, ref2) - assert.NoError(t, err) + actor := newTestActor() + ref1, err := sys.Spawn(ctx, "Test", actor) + assert.NoError(t, err) + assert.NotNil(t, ref1) - // point to the same memory address - assert.True(t, ref1 == ref2) + ref2, err := sys.Spawn(ctx, "Test", actor) + assert.NotNil(t, ref2) + assert.NoError(t, err) - // stop the actor after some time - lib.Pause(time.Second) + // point to the same memory address + assert.True(t, ref1 == ref2) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With RemoteActor/ActorOf with clustering enabled", func(t *testing.T) { + ctx := context.TODO() + nodePorts := dynaport.Get(3) + gossipPort := nodePorts[0] + clusterPort := nodePorts[1] + remotingPort := nodePorts[2] + + logger := log.DiscardLogger + host := "127.0.0.1" + + // define discovered addresses + addrs := []string{ + net.JoinHostPort(host, strconv.Itoa(gossipPort)), + } - t.Cleanup(func() { - err = sys.Stop(ctx) + // mock the discovery provider + provider := new(testkit.Provider) + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + WithClustering(provider, 9, 1, gossipPort, clusterPort, new(testActor)), + ) + require.NoError(t, err) + + provider.EXPECT().ID().Return("testDisco") + provider.EXPECT().Initialize().Return(nil) + provider.EXPECT().Register().Return(nil) + provider.EXPECT().Deregister().Return(nil) + provider.EXPECT().DiscoverPeers().Return(addrs, nil) + provider.EXPECT().Close().Return(nil) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + // wait for the cluster to start + lib.Pause(time.Second) + + // create an actor + actorName := uuid.NewString() + actor := newTestActor() + actorRef, err := newActorSystem.Spawn(ctx, actorName, actor) assert.NoError(t, err) - }) - }) - t.Run("With RemoteActor/ActorOf with clustering enabled", func(t *testing.T) { - ctx := context.TODO() - nodePorts := dynaport.Get(3) - gossipPort := nodePorts[0] - clusterPort := nodePorts[1] - remotingPort := nodePorts[2] - - logger := log.DiscardLogger - host := "127.0.0.1" - - // define discovered addresses - addrs := []string{ - net.JoinHostPort(host, strconv.Itoa(gossipPort)), - } - - // mock the discovery provider - provider := new(testkit.Provider) - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort)), - WithClustering(provider, 9, 1, gossipPort, clusterPort, new(testActor))) - require.NoError(t, err) - - provider.EXPECT().ID().Return("testDisco") - provider.EXPECT().Initialize().Return(nil) - provider.EXPECT().Register().Return(nil) - provider.EXPECT().Deregister().Return(nil) - provider.EXPECT().DiscoverPeers().Return(addrs, nil) - provider.EXPECT().Close().Return(nil) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - // wait for the cluster to start - lib.Pause(time.Second) - - // create an actor - actorName := uuid.NewString() - actor := newTestActor() - actorRef, err := newActorSystem.Spawn(ctx, actorName, actor) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // wait for a while for replication to take effect - // otherwise the subsequent test will return actor not found - lib.Pause(time.Second) - - // get the actor - addr, _, err := newActorSystem.ActorOf(ctx, actorName) - require.NoError(t, err) - require.NotNil(t, addr) - - // use RemoteActor method and compare the results - remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) - require.NoError(t, err) - require.NotNil(t, remoteAddr) - require.True(t, proto.Equal(remoteAddr, addr)) - - reply, err := RemoteAsk(ctx, addr, new(testpb.TestReply), DefaultAskTimeout) - require.NoError(t, err) - require.NotNil(t, reply) - - // get the actor partition - partition := newActorSystem.GetPartition(actorName) - assert.GreaterOrEqual(t, partition, uint64(0)) - - // assert actor not found - actorName = "some-actor" - addr, pid, err := newActorSystem.ActorOf(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorNotFound(actorName).Error()) - require.Nil(t, addr) - require.Nil(t, pid) - - remoteAddr, err = newActorSystem.RemoteActor(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorNotFound(actorName).Error()) - require.Nil(t, remoteAddr) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + assert.NotNil(t, actorRef) + + // wait for a while for replication to take effect + // otherwise the subsequent test will return actor not found + lib.Pause(time.Second) + + // get the actor + addr, _, err := newActorSystem.ActorOf(ctx, actorName) + require.NoError(t, err) + require.NotNil(t, addr) + + // use RemoteActor method and compare the results + remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) + require.NoError(t, err) + require.NotNil(t, remoteAddr) + require.True(t, proto.Equal(remoteAddr, addr)) + + remoting := NewRemoting() + reply, err := remoting.RemoteAsk(ctx, addr, new(testpb.TestReply), DefaultAskTimeout) + require.NoError(t, err) + require.NotNil(t, reply) + + // get the actor partition + partition := newActorSystem.GetPartition(actorName) + assert.GreaterOrEqual(t, partition, uint64(0)) + + // assert actor not found + actorName = "some-actor" + addr, pid, err := newActorSystem.ActorOf(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorNotFound(actorName).Error()) + require.Nil(t, addr) + require.Nil(t, pid) + + remoteAddr, err = newActorSystem.RemoteActor(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorNotFound(actorName).Error()) + require.Nil(t, remoteAddr) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + provider.AssertExpectations(t) + }, + ) + }, + ) + t.Run( + "With remoting enabled", func(t *testing.T) { + ctx := context.TODO() + remotingPort := dynaport.Get(1)[0] + + logger := log.DiscardLogger + host := "127.0.0.1" + + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + ) + require.NoError(t, err) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + // wait for the cluster to fully start + lib.Pause(time.Second) + + // create an actor + actorName := uuid.NewString() + + addr, pid, err := newActorSystem.ActorOf(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrMethodCallNotAllowed.Error()) + require.Nil(t, addr) + require.Nil(t, pid) + + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With ActorOf:remoting not enabled", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - provider.AssertExpectations(t) - }) - }) - t.Run("With remoting enabled", func(t *testing.T) { - ctx := context.TODO() - remotingPort := dynaport.Get(1)[0] - - logger := log.DiscardLogger - host := "127.0.0.1" - - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort))) - require.NoError(t, err) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - // wait for the cluster to fully start - lib.Pause(time.Second) - - // create an actor - actorName := uuid.NewString() - - addr, pid, err := newActorSystem.ActorOf(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrMethodCallNotAllowed.Error()) - require.Nil(t, addr) - require.Nil(t, pid) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + + actorName := "testActor" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + assert.NoError(t, err) + assert.NotNil(t, actorRef) + + addr, pid, err := sys.ActorOf(ctx, actorName) + require.NoError(t, err) + require.NotNil(t, pid) + require.NotNil(t, addr) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With ActorOf: not found", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With ActorOf:remoting not enabled", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - actorName := "testActor" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - addr, pid, err := sys.ActorOf(ctx, actorName) - require.NoError(t, err) - require.NotNil(t, pid) - require.NotNil(t, addr) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = sys.Stop(ctx) + + actorName := "notFound" + addr, pid, err := sys.ActorOf(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorNotFound(actorName).Error()) + require.Nil(t, pid) + require.Nil(t, addr) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With ActorOf actor system started", func(t *testing.T) { + ctx := context.TODO() + remotingPort := dynaport.Get(1)[0] + + logger := log.DiscardLogger + host := "127.0.0.1" + + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + ) + require.NoError(t, err) + + // create an actor + actorName := uuid.NewString() + + addr, pid, err := newActorSystem.ActorOf(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorSystemNotStarted.Error()) + require.Nil(t, addr) + require.Nil(t, pid) + }, + ) + t.Run( + "With ReSpawn", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With ActorOf: not found", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - actorName := "notFound" - addr, pid, err := sys.ActorOf(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorNotFound(actorName).Error()) - require.Nil(t, pid) - require.Nil(t, addr) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = sys.Stop(ctx) + + // create a deadletter subscriber + consumer, err := sys.Subscribe() + require.NoError(t, err) + require.NotNil(t, consumer) + + actorName := "exchanger" + actorRef, err := sys.Spawn(ctx, actorName, &exchanger{}) assert.NoError(t, err) - }) - }) - t.Run("With ActorOf actor system started", func(t *testing.T) { - ctx := context.TODO() - remotingPort := dynaport.Get(1)[0] - - logger := log.DiscardLogger - host := "127.0.0.1" - - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort))) - require.NoError(t, err) - - // create an actor - actorName := uuid.NewString() - - addr, pid, err := newActorSystem.ActorOf(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorSystemNotStarted.Error()) - require.Nil(t, addr) - require.Nil(t, pid) - }) - t.Run("With ReSpawn", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - // create a deadletter subscriber - consumer, err := sys.Subscribe() - require.NoError(t, err) - require.NotNil(t, consumer) - - actorName := "exchanger" - actorRef, err := sys.Spawn(ctx, actorName, &exchanger{}) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // send a message to the actor - reply, err := Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) - require.NoError(t, err) - require.NotNil(t, reply) - expected := new(testpb.Reply) - require.True(t, proto.Equal(expected, reply)) - require.True(t, actorRef.IsRunning()) - - // wait for a while for the system to stop - lib.Pause(time.Second) - // restart the actor - _, err = sys.ReSpawn(ctx, actorName) - require.NoError(t, err) - - // wait for the actor to complete start - // TODO we can add a callback for complete start - lib.Pause(time.Second) - require.True(t, actorRef.IsRunning()) - - var items []*goaktpb.ActorRestarted - for message := range consumer.Iterator() { - payload := message.Payload() - // only listening to deadletters - restarted, ok := payload.(*goaktpb.ActorRestarted) - if ok { - items = append(items, restarted) + assert.NotNil(t, actorRef) + + // send a message to the actor + reply, err := Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) + require.NoError(t, err) + require.NotNil(t, reply) + expected := new(testpb.Reply) + require.True(t, proto.Equal(expected, reply)) + require.True(t, actorRef.IsRunning()) + + // wait for a while for the system to stop + lib.Pause(time.Second) + // restart the actor + _, err = sys.ReSpawn(ctx, actorName) + require.NoError(t, err) + + // wait for the actor to complete start + // TODO we can add a callback for complete start + lib.Pause(time.Second) + require.True(t, actorRef.IsRunning()) + + var items []*goaktpb.ActorRestarted + for message := range consumer.Iterator() { + payload := message.Payload() + // only listening to deadletters + restarted, ok := payload.(*goaktpb.ActorRestarted) + if ok { + items = append(items, restarted) + } } - } - require.Len(t, items, 1) + require.Len(t, items, 1) + + // send a message to the actor + reply, err = Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) + require.NoError(t, err) + require.NotNil(t, reply) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With ReSpawn with PreStart failure", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem( + "testSys", + WithLogger(log.DiscardLogger), + WithExpireActorAfter(time.Minute), + ) + + // start the actor system + err := sys.Start(ctx) + assert.NoError(t, err) - // send a message to the actor - reply, err = Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) - require.NoError(t, err) - require.NotNil(t, reply) + actorName := "actor" + actorRef, err := sys.Spawn(ctx, actorName, newTestRestart()) + assert.NoError(t, err) + assert.NotNil(t, actorRef) + + require.True(t, actorRef.IsRunning()) + + // wait for a while for the system to stop + lib.Pause(time.Second) + // restart the actor + pid, err := sys.ReSpawn(ctx, actorName) + require.Error(t, err) + require.Nil(t, pid) + + require.False(t, actorRef.IsRunning()) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With ReSpawn: actor not found", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) + assert.NoError(t, err) - t.Cleanup(func() { - err = sys.Stop(ctx) + actorName := "exchanger" + actorRef, err := sys.Spawn(ctx, actorName, &exchanger{}) assert.NoError(t, err) - }) - }) - t.Run("With ReSpawn with PreStart failure", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", - WithLogger(log.DiscardLogger), - WithExpireActorAfter(time.Minute)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - actorName := "actor" - actorRef, err := sys.Spawn(ctx, actorName, newTestRestart()) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - require.True(t, actorRef.IsRunning()) - - // wait for a while for the system to stop - lib.Pause(time.Second) - // restart the actor - pid, err := sys.ReSpawn(ctx, actorName) - require.Error(t, err) - require.Nil(t, pid) - - require.False(t, actorRef.IsRunning()) - - t.Cleanup(func() { - err = sys.Stop(ctx) + assert.NotNil(t, actorRef) + + // send a message to the actor + reply, err := Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) + require.NoError(t, err) + require.NotNil(t, reply) + expected := new(testpb.Reply) + require.True(t, proto.Equal(expected, reply)) + require.True(t, actorRef.IsRunning()) + // stop the actor after some time + lib.Pause(time.Second) + + err = sys.Kill(ctx, actorName) + require.NoError(t, err) + + // wait for a while for the system to stop + lib.Pause(time.Second) + // restart the actor + _, err = sys.ReSpawn(ctx, actorName) + require.Error(t, err) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With ReSpawn an actor when not System started", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + _, err := sys.ReSpawn(ctx, "some-actor") + assert.Error(t, err) + assert.EqualError(t, err, "actor system has not started yet") + }, + ) + t.Run( + "ReSpawn with remoting enabled", func(t *testing.T) { + ctx := context.TODO() + remotingPort := dynaport.Get(1)[0] + + logger := log.DiscardLogger + host := "127.0.0.1" + + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + ) + require.NoError(t, err) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + actorName := "exchanger" + actorRef, err := newActorSystem.Spawn(ctx, actorName, &exchanger{}) assert.NoError(t, err) - }) - }) - t.Run("With ReSpawn: actor not found", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - actorName := "exchanger" - actorRef, err := sys.Spawn(ctx, actorName, &exchanger{}) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // send a message to the actor - reply, err := Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) - require.NoError(t, err) - require.NotNil(t, reply) - expected := new(testpb.Reply) - require.True(t, proto.Equal(expected, reply)) - require.True(t, actorRef.IsRunning()) - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Kill(ctx, actorName) - require.NoError(t, err) - - // wait for a while for the system to stop - lib.Pause(time.Second) - // restart the actor - _, err = sys.ReSpawn(ctx, actorName) - require.Error(t, err) - - t.Cleanup(func() { - err = sys.Stop(ctx) + assert.NotNil(t, actorRef) + + // send a message to the actor + reply, err := Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) + require.NoError(t, err) + require.NotNil(t, reply) + expected := new(testpb.Reply) + require.True(t, proto.Equal(expected, reply)) + require.True(t, actorRef.IsRunning()) + // stop the actor after some time + lib.Pause(time.Second) + + // restart the actor + _, err = newActorSystem.ReSpawn(ctx, actorName) + require.NoError(t, err) + + // wait for the actor to complete start + // TODO we can add a callback for complete start + lib.Pause(time.Second) + require.True(t, actorRef.IsRunning()) + + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With NumActors", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With ReSpawn an actor when not System started", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - _, err := sys.ReSpawn(ctx, "some-actor") - assert.Error(t, err) - assert.EqualError(t, err, "actor system has not started yet") - }) - t.Run("ReSpawn with remoting enabled", func(t *testing.T) { - ctx := context.TODO() - remotingPort := dynaport.Get(1)[0] - - logger := log.DiscardLogger - host := "127.0.0.1" - - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort))) - require.NoError(t, err) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - actorName := "exchanger" - actorRef, err := newActorSystem.Spawn(ctx, actorName, &exchanger{}) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // send a message to the actor - reply, err := Ask(ctx, actorRef, new(testpb.TestReply), replyTimeout) - require.NoError(t, err) - require.NotNil(t, reply) - expected := new(testpb.Reply) - require.True(t, proto.Equal(expected, reply)) - require.True(t, actorRef.IsRunning()) - // stop the actor after some time - lib.Pause(time.Second) - - // restart the actor - _, err = newActorSystem.ReSpawn(ctx, actorName) - require.NoError(t, err) - - // wait for the actor to complete start - // TODO we can add a callback for complete start - lib.Pause(time.Second) - require.True(t, actorRef.IsRunning()) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + + actorName := "exchanger" + actorRef, err := sys.Spawn(ctx, actorName, &exchanger{}) assert.NoError(t, err) - }) - }) - t.Run("With NumActors", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + assert.NotNil(t, actorRef) + + // wait for the start of the actor to be complete + lib.Pause(time.Second) + + assert.EqualValues(t, 1, sys.NumActors()) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With remoting enabled: Actor not found", func(t *testing.T) { + ctx := context.TODO() + remotingPort := dynaport.Get(1)[0] + + logger := log.DiscardLogger + host := "127.0.0.1" + + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + ) + require.NoError(t, err) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + lib.Pause(time.Second) + + remoting := NewRemoting() + actorName := "some-actor" + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + require.Nil(t, addr) + + // attempt to send a message will fail + addr = address.From( + &goaktpb.Address{ + Host: host, + Port: int32(remotingPort), + Name: actorName, + Id: "", + }, + ) + reply, err := remoting.RemoteAsk(ctx, addr, new(testpb.TestReply), DefaultAskTimeout) + require.Error(t, err) + require.Nil(t, reply) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With RemoteActor failure when system not started", func(t *testing.T) { + ctx := context.TODO() + remotingPort := dynaport.Get(1)[0] + + logger := log.DiscardLogger + host := "127.0.0.1" + + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + ) + require.NoError(t, err) + + actorName := "some-actor" + remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorSystemNotStarted.Error()) + require.Nil(t, remoteAddr) + }, + ) + t.Run( + "With RemoteActor failure when system not started", func(t *testing.T) { + ctx := context.TODO() + remotingPort := dynaport.Get(1)[0] + + logger := log.DiscardLogger + host := "127.0.0.1" + + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + ) + require.NoError(t, err) - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) + err = newActorSystem.Stop(ctx) + require.Error(t, err) + require.EqualError(t, err, ErrActorSystemNotStarted.Error()) + }, + ) + t.Run( + "With RemoteActor failure when cluster is not enabled", func(t *testing.T) { + ctx := context.TODO() + remotingPort := dynaport.Get(1)[0] + + logger := log.DiscardLogger + host := "127.0.0.1" + + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + ) + require.NoError(t, err) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + // wait for the system to properly start + actorName := "some-actor" + remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrClusterDisabled.Error()) + require.Nil(t, remoteAddr) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With LocalActor", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) + assert.NoError(t, err) - actorName := "exchanger" - actorRef, err := sys.Spawn(ctx, actorName, &exchanger{}) - assert.NoError(t, err) - assert.NotNil(t, actorRef) + // create an actor + actorName := "exchanger" + ref, err := sys.Spawn(ctx, actorName, &exchanger{}) + assert.NoError(t, err) + require.NotNil(t, ref) + + // locate the actor + local, err := sys.LocalActor(actorName) + require.NoError(t, err) + require.NotNil(t, local) + + require.Equal(t, ref.Address().String(), local.Address().String()) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With LocalActor: Actor not found", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) + assert.NoError(t, err) - // wait for the start of the actor to be complete - lib.Pause(time.Second) + // locate the actor + ref, err := sys.LocalActor("some-name") + require.Error(t, err) + require.Nil(t, ref) + require.EqualError(t, err, ErrActorNotFound("some-name").Error()) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With LocalActor when system not started", func(t *testing.T) { + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // create an actor + actorName := "exchanger" + + // locate the actor + local, err := sys.LocalActor(actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorSystemNotStarted.Error()) + require.Nil(t, local) + }, + ) + t.Run( + "With Kill an actor when not System started", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + err := sys.Kill(ctx, "Test") + assert.Error(t, err) + assert.EqualError(t, err, "actor system has not started yet") + }, + ) + t.Run( + "With Kill an actor when actor not found", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) + assert.NoError(t, err) + err = sys.Kill(ctx, "Test") + assert.Error(t, err) + assert.EqualError(t, err, "actor=goakt://testSys@127.0.0.1:0/Test not found") + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With housekeeping", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem( + "housekeeperSys", + WithLogger(log.DiscardLogger), + WithExpireActorAfter(passivateAfter), + ) + + // start the actor system + err := sys.Start(ctx) + assert.NoError(t, err) - assert.EqualValues(t, 1, sys.NumActors()) + // wait for the system to properly start + lib.Pause(time.Second) - t.Cleanup(func() { - err = sys.Stop(ctx) + actorName := "HousekeeperActor" + actorHandler := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actorHandler) assert.NoError(t, err) - }) - }) - t.Run("With remoting enabled: Actor not found", func(t *testing.T) { - ctx := context.TODO() - remotingPort := dynaport.Get(1)[0] - - logger := log.DiscardLogger - host := "127.0.0.1" - - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort))) - require.NoError(t, err) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - lib.Pause(time.Second) - - actorName := "some-actor" - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - require.Nil(t, addr) - - // attempt to send a message will fail - addr = address.From(&goaktpb.Address{ - Host: host, - Port: int32(remotingPort), - Name: actorName, - Id: "", - }) - reply, err := RemoteAsk(ctx, addr, new(testpb.TestReply), DefaultAskTimeout) - require.Error(t, err) - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + require.NotNil(t, actorRef) + + // wait for the actor to properly start + lib.Pause(time.Second) + + // locate the actor + ref, err := sys.LocalActor(actorName) + require.Error(t, err) + require.Nil(t, ref) + require.EqualError(t, err, ErrActorNotFound(actorName).Error()) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With GetPartition returning zero in non cluster env", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem( + "housekeeperSys", + WithLogger(log.DiscardLogger), + WithExpireActorAfter(passivateAfter), + ) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With RemoteActor failure when system not started", func(t *testing.T) { - ctx := context.TODO() - remotingPort := dynaport.Get(1)[0] - - logger := log.DiscardLogger - host := "127.0.0.1" - - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort))) - require.NoError(t, err) - - actorName := "some-actor" - remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorSystemNotStarted.Error()) - require.Nil(t, remoteAddr) - }) - t.Run("With RemoteActor failure when system not started", func(t *testing.T) { - ctx := context.TODO() - remotingPort := dynaport.Get(1)[0] - - logger := log.DiscardLogger - host := "127.0.0.1" - - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort))) - require.NoError(t, err) - - err = newActorSystem.Stop(ctx) - require.Error(t, err) - require.EqualError(t, err, ErrActorSystemNotStarted.Error()) - }) - t.Run("With RemoteActor failure when cluster is not enabled", func(t *testing.T) { - ctx := context.TODO() - remotingPort := dynaport.Get(1)[0] - - logger := log.DiscardLogger - host := "127.0.0.1" - - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort))) - require.NoError(t, err) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - // wait for the system to properly start - actorName := "some-actor" - remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrClusterDisabled.Error()) - require.Nil(t, remoteAddr) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + + // wait for the system to properly start + lib.Pause(time.Second) + + partition := sys.GetPartition("some-actor") + assert.Zero(t, partition) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With actor PostStop error", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With LocalActor", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - // create an actor - actorName := "exchanger" - ref, err := sys.Spawn(ctx, actorName, &exchanger{}) - assert.NoError(t, err) - require.NotNil(t, ref) - - // locate the actor - local, err := sys.LocalActor(actorName) - require.NoError(t, err) - require.NotNil(t, local) - - require.Equal(t, ref.Address().String(), local.Address().String()) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = sys.Stop(ctx) + + actor := &testPostStop{} + actorRef, err := sys.Spawn(ctx, "Test", actor) assert.NoError(t, err) - }) - }) - t.Run("With LocalActor: Actor not found", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - // locate the actor - ref, err := sys.LocalActor("some-name") - require.Error(t, err) - require.Nil(t, ref) - require.EqualError(t, err, ErrActorNotFound("some-name").Error()) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = sys.Stop(ctx) + assert.NotNil(t, actorRef) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + assert.Error(t, sys.Stop(ctx)) + }, + ) + }, + ) + t.Run( + "With deadletters subscription ", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With LocalActor when system not started", func(t *testing.T) { - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // create an actor - actorName := "exchanger" - - // locate the actor - local, err := sys.LocalActor(actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorSystemNotStarted.Error()) - require.Nil(t, local) - }) - t.Run("With Kill an actor when not System started", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - err := sys.Kill(ctx, "Test") - assert.Error(t, err) - assert.EqualError(t, err, "actor system has not started yet") - }) - t.Run("With Kill an actor when actor not found", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - err = sys.Kill(ctx, "Test") - assert.Error(t, err) - assert.EqualError(t, err, "actor=goakt://testSys@127.0.0.1:0/Test not found") - t.Cleanup(func() { - err = sys.Stop(ctx) + + // wait for complete start + lib.Pause(time.Second) + + // create a deadletter subscriber + consumer, err := sys.Subscribe() + require.NoError(t, err) + require.NotNil(t, consumer) + + // create the black hole actor + actor := &discarder{} + actorRef, err := sys.Spawn(ctx, "discarder", actor) assert.NoError(t, err) - }) - }) - t.Run("With housekeeping", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("housekeeperSys", - WithLogger(log.DiscardLogger), - WithExpireActorAfter(passivateAfter)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - // wait for the system to properly start - lib.Pause(time.Second) - - actorName := "HousekeeperActor" - actorHandler := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actorHandler) - assert.NoError(t, err) - require.NotNil(t, actorRef) - - // wait for the actor to properly start - lib.Pause(time.Second) - - // locate the actor - ref, err := sys.LocalActor(actorName) - require.Error(t, err) - require.Nil(t, ref) - require.EqualError(t, err, ErrActorNotFound(actorName).Error()) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = sys.Stop(ctx) + assert.NotNil(t, actorRef) + + // wait a while + lib.Pause(time.Second) + + // every message sent to the actor will result in deadletters + for i := 0; i < 5; i++ { + require.NoError(t, Tell(ctx, actorRef, new(testpb.TestSend))) + } + + lib.Pause(time.Second) + + var items []*goaktpb.Deadletter + for message := range consumer.Iterator() { + payload := message.Payload() + // only listening to deadletters + deadletter, ok := payload.(*goaktpb.Deadletter) + if ok { + items = append(items, deadletter) + } + } + + require.Len(t, items, 5) + + // unsubscribe the consumer + err = sys.Unsubscribe(consumer) + require.NoError(t, err) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With deadletters subscription when not started", func(t *testing.T) { + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // create a deadletter subscriber + consumer, err := sys.Subscribe() + require.Error(t, err) + require.Nil(t, consumer) + }, + ) + t.Run( + "With deadletters unsubscription when not started", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With GetPartition returning zero in non cluster env", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("housekeeperSys", - WithLogger(log.DiscardLogger), - WithExpireActorAfter(passivateAfter)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - // wait for the system to properly start - lib.Pause(time.Second) - - partition := sys.GetPartition("some-actor") - assert.Zero(t, partition) - - t.Cleanup(func() { - err = sys.Stop(ctx) + + consumer, err := sys.Subscribe() + require.NoError(t, err) + require.NotNil(t, consumer) + + // stop the actor system + assert.NoError(t, sys.Stop(ctx)) + + lib.Pause(time.Second) + + // create a deadletter subscriber + err = sys.Unsubscribe(consumer) + require.Error(t, err) + }, + ) + t.Run( + "With Passivation with clustering enabled", func(t *testing.T) { + ctx := context.TODO() + nodePorts := dynaport.Get(3) + gossipPort := nodePorts[0] + clusterPort := nodePorts[1] + remotingPort := nodePorts[2] + + logger := log.DiscardLogger + host := "127.0.0.1" + + // define discovered addresses + addrs := []string{ + net.JoinHostPort(host, strconv.Itoa(gossipPort)), + } + + // mock the discovery provider + provider := new(testkit.Provider) + newActorSystem, err := NewActorSystem( + "test", + WithExpireActorAfter(passivateAfter), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + WithClustering(provider, 9, 1, gossipPort, clusterPort, new(testActor)), + ) + require.NoError(t, err) + + provider.EXPECT().ID().Return("testDisco") + provider.EXPECT().Initialize().Return(nil) + provider.EXPECT().Register().Return(nil) + provider.EXPECT().Deregister().Return(nil) + provider.EXPECT().DiscoverPeers().Return(addrs, nil) + provider.EXPECT().Close().Return(nil) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + // wait for the cluster to start + lib.Pause(time.Second) + + // create an actor + actorName := uuid.NewString() + actor := newTestActor() + actorRef, err := newActorSystem.Spawn(ctx, actorName, actor) assert.NoError(t, err) - }) - }) - t.Run("With actor PostStop error", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - actor := &testPostStop{} - actorRef, err := sys.Spawn(ctx, "Test", actor) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - assert.Error(t, sys.Stop(ctx)) - }) - }) - t.Run("With deadletters subscription ", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - // wait for complete start - lib.Pause(time.Second) - - // create a deadletter subscriber - consumer, err := sys.Subscribe() - require.NoError(t, err) - require.NotNil(t, consumer) - - // create the black hole actor - actor := &discarder{} - actorRef, err := sys.Spawn(ctx, "discarder", actor) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // wait a while - lib.Pause(time.Second) - - // every message sent to the actor will result in deadletters - for i := 0; i < 5; i++ { - require.NoError(t, Tell(ctx, actorRef, new(testpb.TestSend))) - } - - lib.Pause(time.Second) - - var items []*goaktpb.Deadletter - for message := range consumer.Iterator() { - payload := message.Payload() - // only listening to deadletters - deadletter, ok := payload.(*goaktpb.Deadletter) - if ok { - items = append(items, deadletter) + assert.NotNil(t, actorRef) + + // wait for a while for replication to take effect + // otherwise the subsequent test will return actor not found + lib.Pause(time.Second) + + // get the actor + addr, pid, err := newActorSystem.ActorOf(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorNotFound(actorName).Error()) + require.Nil(t, addr) + require.Nil(t, pid) + + // use RemoteActor method and compare the results + remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) + require.Error(t, err) + require.EqualError(t, err, ErrActorNotFound(actorName).Error()) + require.Nil(t, remoteAddr) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + provider.AssertExpectations(t) + }, + ) + }, + ) + t.Run( + "With cluster events subscription", func(t *testing.T) { + // create a context + ctx := context.TODO() + // start the NATS server + srv := startNatsServer(t) + + // create and start system cluster + cl1, sd1 := startClusterSystem(t, "Node1", srv.Addr().String()) + peerAddress1 := cl1.PeerAddress() + require.NotEmpty(t, peerAddress1) + + // create a subscriber to node 1 + subscriber1, err := cl1.Subscribe() + require.NoError(t, err) + require.NotNil(t, subscriber1) + + // create and start system cluster + cl2, sd2 := startClusterSystem(t, "Node2", srv.Addr().String()) + peerAddress2 := cl2.PeerAddress() + require.NotEmpty(t, peerAddress2) + + // create a subscriber to node 2 + subscriber2, err := cl2.Subscribe() + require.NoError(t, err) + require.NotNil(t, subscriber2) + + // wait for some time + lib.Pause(time.Second) + + // capture the joins + var joins []*goaktpb.NodeJoined + for event := range subscriber1.Iterator() { + // get the event payload + payload := event.Payload() + // only listening to cluster event + nodeJoined, ok := payload.(*goaktpb.NodeJoined) + require.True(t, ok) + joins = append(joins, nodeJoined) } - } - require.Len(t, items, 5) + // assert the joins list + require.NotEmpty(t, joins) + require.Len(t, joins, 1) + require.Equal(t, peerAddress2, joins[0].GetAddress()) + + // wait for some time + lib.Pause(time.Second) + + // stop the node + require.NoError(t, cl1.Unsubscribe(subscriber1)) + assert.NoError(t, cl1.Stop(ctx)) + assert.NoError(t, sd1.Close()) - // unsubscribe the consumer - err = sys.Unsubscribe(consumer) - require.NoError(t, err) + // wait for some time + lib.Pause(time.Second) - t.Cleanup(func() { - err = sys.Stop(ctx) + var lefts []*goaktpb.NodeLeft + for event := range subscriber2.Iterator() { + payload := event.Payload() + + // only listening to cluster event + nodeLeft, ok := payload.(*goaktpb.NodeLeft) + require.True(t, ok) + lefts = append(lefts, nodeLeft) + } + + require.NotEmpty(t, lefts) + require.Len(t, lefts, 1) + require.Equal(t, peerAddress1, lefts[0].GetAddress()) + + require.NoError(t, cl2.Unsubscribe(subscriber2)) + + t.Cleanup( + func() { + assert.NoError(t, cl2.Stop(ctx)) + // stop the discovery engines + assert.NoError(t, sd2.Close()) + // shutdown the nats server gracefully + srv.Shutdown() + }, + ) + }, + ) + t.Run( + "With PeerAddress empty when cluster not enabled", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With deadletters subscription when not started", func(t *testing.T) { - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // create a deadletter subscriber - consumer, err := sys.Subscribe() - require.Error(t, err) - require.Nil(t, consumer) - }) - t.Run("With deadletters unsubscription when not started", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - consumer, err := sys.Subscribe() - require.NoError(t, err) - require.NotNil(t, consumer) - - // stop the actor system - assert.NoError(t, sys.Stop(ctx)) - - lib.Pause(time.Second) - - // create a deadletter subscriber - err = sys.Unsubscribe(consumer) - require.Error(t, err) - }) - t.Run("With Passivation with clustering enabled", func(t *testing.T) { - ctx := context.TODO() - nodePorts := dynaport.Get(3) - gossipPort := nodePorts[0] - clusterPort := nodePorts[1] - remotingPort := nodePorts[2] - - logger := log.DiscardLogger - host := "127.0.0.1" - - // define discovered addresses - addrs := []string{ - net.JoinHostPort(host, strconv.Itoa(gossipPort)), - } - - // mock the discovery provider - provider := new(testkit.Provider) - newActorSystem, err := NewActorSystem( - "test", - WithExpireActorAfter(passivateAfter), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort)), - WithClustering(provider, 9, 1, gossipPort, clusterPort, new(testActor))) - require.NoError(t, err) - - provider.EXPECT().ID().Return("testDisco") - provider.EXPECT().Initialize().Return(nil) - provider.EXPECT().Register().Return(nil) - provider.EXPECT().Deregister().Return(nil) - provider.EXPECT().DiscoverPeers().Return(addrs, nil) - provider.EXPECT().Close().Return(nil) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - // wait for the cluster to start - lib.Pause(time.Second) - - // create an actor - actorName := uuid.NewString() - actor := newTestActor() - actorRef, err := newActorSystem.Spawn(ctx, actorName, actor) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // wait for a while for replication to take effect - // otherwise the subsequent test will return actor not found - lib.Pause(time.Second) - - // get the actor - addr, pid, err := newActorSystem.ActorOf(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorNotFound(actorName).Error()) - require.Nil(t, addr) - require.Nil(t, pid) - - // use RemoteActor method and compare the results - remoteAddr, err := newActorSystem.RemoteActor(ctx, actorName) - require.Error(t, err) - require.EqualError(t, err, ErrActorNotFound(actorName).Error()) - require.Nil(t, remoteAddr) - - // stop the actor after some time - lib.Pause(time.Second) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + require.Empty(t, sys.PeerAddress()) + + require.NoError(t, sys.Stop(ctx)) + }, + ) + t.Run( + "With SpawnFromFunc (cluster/remote enabled)", func(t *testing.T) { + ctx := context.TODO() + nodePorts := dynaport.Get(3) + gossipPort := nodePorts[0] + clusterPort := nodePorts[1] + remotingPort := nodePorts[2] + + logger := log.DiscardLogger + host := "127.0.0.1" + + // define discovered addresses + addrs := []string{ + net.JoinHostPort(host, strconv.Itoa(gossipPort)), + } + + // mock the discovery provider + provider := new(testkit.Provider) + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + WithClustering(provider, 9, 1, gossipPort, clusterPort, new(testActor)), + ) + require.NoError(t, err) + + provider.EXPECT().ID().Return("testDisco") + provider.EXPECT().Initialize().Return(nil) + provider.EXPECT().Register().Return(nil) + provider.EXPECT().Deregister().Return(nil) + provider.EXPECT().DiscoverPeers().Return(addrs, nil) + provider.EXPECT().Close().Return(nil) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + receiveFn := func(_ context.Context, message proto.Message) error { + expected := &testpb.Reply{Content: "test spawn from func"} + assert.True(t, proto.Equal(expected, message)) + return nil + } + + actorRef, err := newActorSystem.SpawnFromFunc(ctx, receiveFn) assert.NoError(t, err) - provider.AssertExpectations(t) - }) - }) - t.Run("With cluster events subscription", func(t *testing.T) { - // create a context - ctx := context.TODO() - // start the NATS server - srv := startNatsServer(t) - - // create and start system cluster - cl1, sd1 := startClusterSystem(t, "Node1", srv.Addr().String()) - peerAddress1 := cl1.PeerAddress() - require.NotEmpty(t, peerAddress1) - - // create a subscriber to node 1 - subscriber1, err := cl1.Subscribe() - require.NoError(t, err) - require.NotNil(t, subscriber1) - - // create and start system cluster - cl2, sd2 := startClusterSystem(t, "Node2", srv.Addr().String()) - peerAddress2 := cl2.PeerAddress() - require.NotEmpty(t, peerAddress2) - - // create a subscriber to node 2 - subscriber2, err := cl2.Subscribe() - require.NoError(t, err) - require.NotNil(t, subscriber2) - - // wait for some time - lib.Pause(time.Second) - - // capture the joins - var joins []*goaktpb.NodeJoined - for event := range subscriber1.Iterator() { - // get the event payload - payload := event.Payload() - // only listening to cluster event - nodeJoined, ok := payload.(*goaktpb.NodeJoined) - require.True(t, ok) - joins = append(joins, nodeJoined) - } - - // assert the joins list - require.NotEmpty(t, joins) - require.Len(t, joins, 1) - require.Equal(t, peerAddress2, joins[0].GetAddress()) - - // wait for some time - lib.Pause(time.Second) - - // stop the node - require.NoError(t, cl1.Unsubscribe(subscriber1)) - assert.NoError(t, cl1.Stop(ctx)) - assert.NoError(t, sd1.Close()) - - // wait for some time - lib.Pause(time.Second) - - var lefts []*goaktpb.NodeLeft - for event := range subscriber2.Iterator() { - payload := event.Payload() - - // only listening to cluster event - nodeLeft, ok := payload.(*goaktpb.NodeLeft) - require.True(t, ok) - lefts = append(lefts, nodeLeft) - } - - require.NotEmpty(t, lefts) - require.Len(t, lefts, 1) - require.Equal(t, peerAddress1, lefts[0].GetAddress()) - - require.NoError(t, cl2.Unsubscribe(subscriber2)) - - t.Cleanup(func() { - assert.NoError(t, cl2.Stop(ctx)) - // stop the discovery engines - assert.NoError(t, sd2.Close()) - // shutdown the nats server gracefully - srv.Shutdown() - }) - }) - t.Run("With PeerAddress empty when cluster not enabled", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - require.Empty(t, sys.PeerAddress()) - - require.NoError(t, sys.Stop(ctx)) - }) - t.Run("With SpawnFromFunc (cluster/remote enabled)", func(t *testing.T) { - ctx := context.TODO() - nodePorts := dynaport.Get(3) - gossipPort := nodePorts[0] - clusterPort := nodePorts[1] - remotingPort := nodePorts[2] - - logger := log.DiscardLogger - host := "127.0.0.1" - - // define discovered addresses - addrs := []string{ - net.JoinHostPort(host, strconv.Itoa(gossipPort)), - } - - // mock the discovery provider - provider := new(testkit.Provider) - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort)), - WithClustering(provider, 9, 1, gossipPort, clusterPort, new(testActor))) - require.NoError(t, err) - - provider.EXPECT().ID().Return("testDisco") - provider.EXPECT().Initialize().Return(nil) - provider.EXPECT().Register().Return(nil) - provider.EXPECT().Deregister().Return(nil) - provider.EXPECT().DiscoverPeers().Return(addrs, nil) - provider.EXPECT().Close().Return(nil) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - receiveFn := func(_ context.Context, message proto.Message) error { - expected := &testpb.Reply{Content: "test spawn from func"} - assert.True(t, proto.Equal(expected, message)) - return nil - } - - actorRef, err := newActorSystem.SpawnFromFunc(ctx, receiveFn) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // stop the actor after some time - lib.Pause(time.Second) - - // send a message to the actor - require.NoError(t, Tell(ctx, actorRef, &testpb.Reply{Content: "test spawn from func"})) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + assert.NotNil(t, actorRef) + + // stop the actor after some time + lib.Pause(time.Second) + + // send a message to the actor + require.NoError(t, Tell(ctx, actorRef, &testpb.Reply{Content: "test spawn from func"})) + + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + provider.AssertExpectations(t) + }, + ) + }, + ) + t.Run( + "With SpawnFromFunc with PreStart error", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - provider.AssertExpectations(t) - }) - }) - t.Run("With SpawnFromFunc with PreStart error", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - receiveFn := func(_ context.Context, message proto.Message) error { - expected := &testpb.Reply{Content: "test spawn from func"} - assert.True(t, proto.Equal(expected, message)) - return nil - } - - preStart := func(ctx context.Context) error { - return errors.New("failed") - } - - actorRef, err := sys.SpawnFromFunc(ctx, receiveFn, WithPreStart(preStart)) - assert.Error(t, err) - assert.Nil(t, actorRef) - - t.Cleanup(func() { - err = sys.Stop(ctx) + + receiveFn := func(_ context.Context, message proto.Message) error { + expected := &testpb.Reply{Content: "test spawn from func"} + assert.True(t, proto.Equal(expected, message)) + return nil + } + + preStart := func(ctx context.Context) error { + return errors.New("failed") + } + + actorRef, err := sys.SpawnFromFunc(ctx, receiveFn, WithPreStart(preStart)) + assert.Error(t, err) + assert.Nil(t, actorRef) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With SpawnFromFunc with PreStop error", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With SpawnFromFunc with PreStop error", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := sys.Start(ctx) - assert.NoError(t, err) - - receiveFn := func(ctx context.Context, message proto.Message) error { - expected := &testpb.Reply{Content: "test spawn from func"} - assert.True(t, proto.Equal(expected, message)) - return nil - } - - postStop := func(ctx context.Context) error { - return errors.New("failed") - } - - actorRef, err := sys.SpawnFromFunc(ctx, receiveFn, WithPostStop(postStop)) - assert.NoError(t, err) - assert.NotNil(t, actorRef) - - // stop the actor after some time - lib.Pause(time.Second) - - // send a message to the actor - require.NoError(t, Tell(ctx, actorRef, &testpb.Reply{Content: "test spawn from func"})) - - t.Cleanup(func() { - assert.Error(t, sys.Stop(ctx)) - }) - }) - t.Run("With SpawnFromFunc with actorSystem not started", func(t *testing.T) { - ctx := context.TODO() - sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - receiveFn := func(ctx context.Context, message proto.Message) error { - expected := &testpb.Reply{Content: "test spawn from func"} - assert.True(t, proto.Equal(expected, message)) - return nil - } - - preStart := func(ctx context.Context) error { - return errors.New("failed") - } - - actorRef, err := sys.SpawnFromFunc(ctx, receiveFn, WithPreStart(preStart)) - assert.Error(t, err) - assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) - assert.Nil(t, actorRef) - }) - t.Run("With happy path Register", func(t *testing.T) { - ctx := context.TODO() - logger := log.DiscardLogger - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - // register the actor - err = sys.Register(ctx, &exchanger{}) - require.NoError(t, err) - - t.Cleanup(func() { - err = sys.Stop(ctx) + + receiveFn := func(ctx context.Context, message proto.Message) error { + expected := &testpb.Reply{Content: "test spawn from func"} + assert.True(t, proto.Equal(expected, message)) + return nil + } + + postStop := func(ctx context.Context) error { + return errors.New("failed") + } + + actorRef, err := sys.SpawnFromFunc(ctx, receiveFn, WithPostStop(postStop)) assert.NoError(t, err) - }) - }) - t.Run("With Register when actor system not started", func(t *testing.T) { - ctx := context.TODO() - logger := log.DiscardLogger - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // register the actor - err = sys.Register(ctx, &exchanger{}) - require.Error(t, err) - assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) - - t.Cleanup(func() { - err = sys.Stop(ctx) + assert.NotNil(t, actorRef) + + // stop the actor after some time + lib.Pause(time.Second) + + // send a message to the actor + require.NoError(t, Tell(ctx, actorRef, &testpb.Reply{Content: "test spawn from func"})) + + t.Cleanup( + func() { + assert.Error(t, sys.Stop(ctx)) + }, + ) + }, + ) + t.Run( + "With SpawnFromFunc with actorSystem not started", func(t *testing.T) { + ctx := context.TODO() + sys, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + receiveFn := func(ctx context.Context, message proto.Message) error { + expected := &testpb.Reply{Content: "test spawn from func"} + assert.True(t, proto.Equal(expected, message)) + return nil + } + + preStart := func(ctx context.Context) error { + return errors.New("failed") + } + + actorRef, err := sys.SpawnFromFunc(ctx, receiveFn, WithPreStart(preStart)) assert.Error(t, err) - }) - }) - t.Run("With happy path Deregister", func(t *testing.T) { - ctx := context.TODO() - logger := log.DiscardLogger - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - // register the actor - err = sys.Register(ctx, &exchanger{}) - require.NoError(t, err) - - err = sys.Deregister(ctx, &exchanger{}) - require.NoError(t, err) - - t.Cleanup(func() { - err = sys.Stop(ctx) + assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) + assert.Nil(t, actorRef) + }, + ) + t.Run( + "With happy path Register", func(t *testing.T) { + ctx := context.TODO() + logger := log.DiscardLogger + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With Deregister when actor system not started", func(t *testing.T) { - ctx := context.TODO() - logger := log.DiscardLogger - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - err = sys.Deregister(ctx, &exchanger{}) - require.Error(t, err) - assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) - - t.Cleanup(func() { - err = sys.Stop(ctx) - assert.Error(t, err) - }) - }) - t.Run("With cluster start failure with remoting not enabled", func(t *testing.T) { - ctx := context.TODO() - logger := log.DiscardLogger - mockedCluster := new(clustermocks.Interface) - mockedErr := errors.New("failed to start") - mockedCluster.EXPECT().Start(ctx).Return(mockedErr) - - // mock the discovery provider - provider := new(testkit.Provider) - provider.EXPECT().ID().Return("id") - - system := &actorSystem{ - name: "testSystem", - logger: logger, - cluster: mockedCluster, - locker: sync.Mutex{}, - scheduler: newScheduler(logger, time.Second, withSchedulerCluster(mockedCluster)), - clusterConfig: NewClusterConfig(), - registry: types.NewRegistry(), - } - system.clusterEnabled.Store(true) - - err := system.Start(ctx) - require.Error(t, err) - assert.EqualError(t, err, "clustering needs remoting to be enabled") - }) - t.Run("With RemoteSpawn with clustering enabled", func(t *testing.T) { - ctx := context.TODO() - nodePorts := dynaport.Get(3) - gossipPort := nodePorts[0] - clusterPort := nodePorts[1] - remotingPort := nodePorts[2] - - logger := log.DiscardLogger - host := "127.0.0.1" - - // define discovered addresses - addrs := []string{ - net.JoinHostPort(host, strconv.Itoa(gossipPort)), - } - - // mock the discovery provider - provider := new(testkit.Provider) - newActorSystem, err := NewActorSystem( - "test", - WithPassivationDisabled(), - WithLogger(logger), - WithReplyTimeout(time.Minute), - WithRemoting(host, int32(remotingPort)), - WithClustering(provider, 9, 1, gossipPort, clusterPort, new(exchanger))) - require.NoError(t, err) - - provider.EXPECT().ID().Return("testDisco") - provider.EXPECT().Initialize().Return(nil) - provider.EXPECT().Register().Return(nil) - provider.EXPECT().Deregister().Return(nil) - provider.EXPECT().DiscoverPeers().Return(addrs, nil) - provider.EXPECT().Close().Return(nil) - - // start the actor system - err = newActorSystem.Start(ctx) - require.NoError(t, err) - - // wait for the cluster to start - lib.Pause(time.Second) - - // create an actor - actorName := "actorID" - // fetching the address of the that actor should return nil address - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - require.Nil(t, addr) - - // spawn the remote actor - err = RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") - require.NoError(t, err) - - // re-fetching the address of the actor should return not nil address after start - addr, err = RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - require.NotNil(t, addr) - - // send the message to exchanger actor one using remote messaging - reply, err := RemoteAsk(ctx, addr, new(testpb.TestReply), DefaultAskTimeout) - - require.NoError(t, err) - require.NotNil(t, reply) - require.True(t, reply.MessageIs(new(testpb.Reply))) - - actual := new(testpb.Reply) - err = reply.UnmarshalTo(actual) - require.NoError(t, err) - - expected := new(testpb.Reply) - assert.True(t, proto.Equal(expected, actual)) - - t.Cleanup(func() { - err = newActorSystem.Stop(ctx) + + // register the actor + err = sys.Register(ctx, &exchanger{}) + require.NoError(t, err) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With Register when actor system not started", func(t *testing.T) { + ctx := context.TODO() + logger := log.DiscardLogger + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // register the actor + err = sys.Register(ctx, &exchanger{}) + require.Error(t, err) + assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.Error(t, err) + }, + ) + }, + ) + t.Run( + "With happy path Deregister", func(t *testing.T) { + ctx := context.TODO() + logger := log.DiscardLogger + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) assert.NoError(t, err) - }) - }) - t.Run("With Spawn with custom mailbox", func(t *testing.T) { - ctx := context.TODO() - actorSystem, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) - - // start the actor system - err := actorSystem.Start(ctx) - assert.NoError(t, err) - - // wait for complete start - lib.Pause(time.Second) - - // create the black hole actor - actor := newTestActor() - pid, err := actorSystem.Spawn(ctx, "test", actor, WithMailbox(NewBoundedMailbox(10))) - assert.NoError(t, err) - assert.NotNil(t, pid) - - // wait a while - lib.Pause(time.Second) - assert.EqualValues(t, 1, pid.ProcessedCount()) - require.True(t, pid.IsRunning()) - - // every message sent to the actor will result in deadletters - counter := 0 - for i := 1; i <= 5; i++ { - require.NoError(t, Tell(ctx, pid, new(testpb.TestSend))) - counter = counter + 1 - } - - lib.Pause(time.Second) - - assert.EqualValues(t, counter, pid.ProcessedCount()-1) - require.NoError(t, err) - - t.Cleanup(func() { - err = actorSystem.Stop(ctx) + + // register the actor + err = sys.Register(ctx, &exchanger{}) + require.NoError(t, err) + + err = sys.Deregister(ctx, &exchanger{}) + require.NoError(t, err) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With Deregister when actor system not started", func(t *testing.T) { + ctx := context.TODO() + logger := log.DiscardLogger + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + err = sys.Deregister(ctx, &exchanger{}) + require.Error(t, err) + assert.EqualError(t, err, ErrActorSystemNotStarted.Error()) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.Error(t, err) + }, + ) + }, + ) + t.Run( + "With cluster start failure with remoting not enabled", func(t *testing.T) { + ctx := context.TODO() + logger := log.DiscardLogger + mockedCluster := new(clustermocks.Interface) + mockedErr := errors.New("failed to start") + mockedCluster.EXPECT().Start(ctx).Return(mockedErr) + + // mock the discovery provider + provider := new(testkit.Provider) + provider.EXPECT().ID().Return("id") + + system := &actorSystem{ + name: "testSystem", + logger: logger, + cluster: mockedCluster, + locker: sync.Mutex{}, + scheduler: newScheduler(logger, time.Second, withSchedulerCluster(mockedCluster)), + clusterConfig: NewClusterConfig(), + registry: types.NewRegistry(), + } + system.clusterEnabled.Store(true) + + err := system.Start(ctx) + require.Error(t, err) + assert.EqualError(t, err, "clustering needs remoting to be enabled") + }, + ) + t.Run( + "With RemoteSpawn with clustering enabled", func(t *testing.T) { + ctx := context.TODO() + nodePorts := dynaport.Get(3) + gossipPort := nodePorts[0] + clusterPort := nodePorts[1] + remotingPort := nodePorts[2] + + logger := log.DiscardLogger + host := "127.0.0.1" + + // define discovered addresses + addrs := []string{ + net.JoinHostPort(host, strconv.Itoa(gossipPort)), + } + + // mock the discovery provider + provider := new(testkit.Provider) + newActorSystem, err := NewActorSystem( + "test", + WithPassivationDisabled(), + WithLogger(logger), + WithReplyTimeout(time.Minute), + WithRemoting(host, int32(remotingPort)), + WithClustering(provider, 9, 1, gossipPort, clusterPort, new(exchanger)), + ) + require.NoError(t, err) + + provider.EXPECT().ID().Return("testDisco") + provider.EXPECT().Initialize().Return(nil) + provider.EXPECT().Register().Return(nil) + provider.EXPECT().Deregister().Return(nil) + provider.EXPECT().DiscoverPeers().Return(addrs, nil) + provider.EXPECT().Close().Return(nil) + + // start the actor system + err = newActorSystem.Start(ctx) + require.NoError(t, err) + + // wait for the cluster to start + lib.Pause(time.Second) + + remoting := NewRemoting() + // create an actor + actorName := "actorID" + // fetching the address of the that actor should return nil address + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + require.Nil(t, addr) + + // spawn the remote actor + err = remoting.RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") + require.NoError(t, err) + + // re-fetching the address of the actor should return not nil address after start + addr, err = remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + require.NotNil(t, addr) + + // send the message to exchanger actor one using remote messaging + reply, err := remoting.RemoteAsk(ctx, addr, new(testpb.TestReply), DefaultAskTimeout) + + require.NoError(t, err) + require.NotNil(t, reply) + require.True(t, reply.MessageIs(new(testpb.Reply))) + + actual := new(testpb.Reply) + err = reply.UnmarshalTo(actual) + require.NoError(t, err) + + expected := new(testpb.Reply) + assert.True(t, proto.Equal(expected, actual)) + + t.Cleanup( + func() { + err = newActorSystem.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + t.Run( + "With Spawn with custom mailbox", func(t *testing.T) { + ctx := context.TODO() + actorSystem, _ := NewActorSystem("testSys", WithLogger(log.DiscardLogger)) + + // start the actor system + err := actorSystem.Start(ctx) assert.NoError(t, err) - }) - }) + + // wait for complete start + lib.Pause(time.Second) + + // create the black hole actor + actor := newTestActor() + pid, err := actorSystem.Spawn(ctx, "test", actor, WithMailbox(NewBoundedMailbox(10))) + assert.NoError(t, err) + assert.NotNil(t, pid) + + // wait a while + lib.Pause(time.Second) + assert.EqualValues(t, 1, pid.ProcessedCount()) + require.True(t, pid.IsRunning()) + + // every message sent to the actor will result in deadletters + counter := 0 + for i := 1; i <= 5; i++ { + require.NoError(t, Tell(ctx, pid, new(testpb.TestSend))) + counter = counter + 1 + } + + lib.Pause(time.Second) + + assert.EqualValues(t, counter, pid.ProcessedCount()-1) + require.NoError(t, err) + + t.Cleanup( + func() { + err = actorSystem.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) } diff --git a/actors/api.go b/actors/api.go index cf3dcce2..8fc84f5f 100644 --- a/actors/api.go +++ b/actors/api.go @@ -26,18 +26,12 @@ package actors import ( "context" - "strings" "time" - "connectrpc.com/connect" "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/durationpb" "github.com/tochemey/goakt/v2/address" - "github.com/tochemey/goakt/v2/internal/http" "github.com/tochemey/goakt/v2/internal/internalpb" - "github.com/tochemey/goakt/v2/internal/internalpb/internalpbconnect" ) // Ask sends a synchronous message to another actor and expect a response. @@ -109,312 +103,6 @@ func BatchAsk(ctx context.Context, to *PID, timeout time.Duration, messages ...p return } -// RemoteTell sends a message to an actor remotely without expecting any reply -func RemoteTell(ctx context.Context, to *address.Address, message proto.Message) error { - marshaled, err := anypb.New(message) - if err != nil { - return ErrInvalidRemoteMessage(err) - } - - remoteClient := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(to.GetHost(), int(to.GetPort())), - ) - - request := &internalpb.RemoteTellRequest{ - RemoteMessage: &internalpb.RemoteMessage{ - Sender: address.NoSender, - Receiver: to.Address, - Message: marshaled, - }, - } - - stream := remoteClient.RemoteTell(ctx) - if err := stream.Send(request); err != nil { - if eof(err) { - if _, err := stream.CloseAndReceive(); err != nil { - return err - } - return nil - } - return err - } - - // close the connection - if _, err := stream.CloseAndReceive(); err != nil { - return err - } - - return nil -} - -// RemoteAsk sends a synchronous message to another actor remotely and expect a response. -func RemoteAsk(ctx context.Context, to *address.Address, message proto.Message, timeout time.Duration) (response *anypb.Any, err error) { - marshaled, err := anypb.New(message) - if err != nil { - return nil, ErrInvalidRemoteMessage(err) - } - - remotingService := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(to.GetHost(), int(to.GetPort())), - ) - - request := &internalpb.RemoteAskRequest{ - RemoteMessage: &internalpb.RemoteMessage{ - Sender: address.NoSender, - Receiver: to.Address, - Message: marshaled, - }, - Timeout: durationpb.New(timeout), - } - stream := remotingService.RemoteAsk(ctx) - errc := make(chan error, 1) - - go func() { - defer close(errc) - for { - resp, err := stream.Receive() - if err != nil { - errc <- err - return - } - - response = resp.GetMessage() - } - }() - - err = stream.Send(request) - if err != nil { - return nil, err - } - - if err := stream.CloseRequest(); err != nil { - return nil, err - } - - err = <-errc - if eof(err) { - return response, nil - } - - if err != nil { - return nil, err - } - - return -} - -// RemoteLookup look for an actor address on a remote node. -func RemoteLookup(ctx context.Context, host string, port int, name string) (addr *address.Address, err error) { - remoteClient := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(host, port), - ) - - request := connect.NewRequest(&internalpb.RemoteLookupRequest{ - Host: host, - Port: int32(port), - Name: name, - }) - - response, err := remoteClient.RemoteLookup(ctx, request) - if err != nil { - code := connect.CodeOf(err) - if code == connect.CodeNotFound { - return nil, nil - } - return nil, err - } - - return address.From(response.Msg.GetAddress()), nil -} - -// RemoteBatchTell sends bulk asynchronous messages to an actor -func RemoteBatchTell(ctx context.Context, to *address.Address, messages ...proto.Message) error { - var requests []*internalpb.RemoteTellRequest - for _, message := range messages { - packed, err := anypb.New(message) - if err != nil { - return ErrInvalidRemoteMessage(err) - } - - requests = append(requests, &internalpb.RemoteTellRequest{ - RemoteMessage: &internalpb.RemoteMessage{ - Sender: address.NoSender, - Receiver: to.Address, - Message: packed, - }, - }) - } - - remoteClient := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(to.GetHost(), int(to.GetPort())), - ) - - stream := remoteClient.RemoteTell(ctx) - for _, request := range requests { - err := stream.Send(request) - if eof(err) { - if _, err := stream.CloseAndReceive(); err != nil { - return err - } - return nil - } - - if err != nil { - return err - } - } - - // close the connection - if _, err := stream.CloseAndReceive(); err != nil { - return err - } - - return nil -} - -// RemoteBatchAsk sends bulk messages to an actor with responses expected -func RemoteBatchAsk(ctx context.Context, to *address.Address, messages ...proto.Message) (responses []*anypb.Any, err error) { - var requests []*internalpb.RemoteAskRequest - for _, message := range messages { - packed, err := anypb.New(message) - if err != nil { - return nil, ErrInvalidRemoteMessage(err) - } - - requests = append(requests, &internalpb.RemoteAskRequest{ - RemoteMessage: &internalpb.RemoteMessage{ - Sender: address.NoSender, - Receiver: to.Address, - Message: packed, - }, - }) - } - - remoteClient := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(to.GetHost(), int(to.GetPort())), - ) - - stream := remoteClient.RemoteAsk(ctx) - errc := make(chan error, 1) - - go func() { - defer close(errc) - for { - resp, err := stream.Receive() - if err != nil { - errc <- err - return - } - - responses = append(responses, resp.GetMessage()) - } - }() - - for _, request := range requests { - err := stream.Send(request) - if err != nil { - return nil, err - } - } - - if err := stream.CloseRequest(); err != nil { - return nil, err - } - - err = <-errc - if eof(err) { - return responses, nil - } - - if err != nil { - return nil, err - } - - return -} - -// RemoteReSpawn restarts actor on a remote node. -func RemoteReSpawn(ctx context.Context, host string, port int, name string) error { - remoteClient := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(host, port), - ) - - request := connect.NewRequest(&internalpb.RemoteReSpawnRequest{ - Host: host, - Port: int32(port), - Name: name, - }) - - if _, err := remoteClient.RemoteReSpawn(ctx, request); err != nil { - code := connect.CodeOf(err) - if code == connect.CodeNotFound { - return nil - } - return err - } - - return nil -} - -// RemoteStop stops an actor on a remote node. -func RemoteStop(ctx context.Context, host string, port int, name string) error { - remoteClient := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(host, port), - ) - - request := connect.NewRequest(&internalpb.RemoteStopRequest{ - Host: host, - Port: int32(port), - Name: name, - }) - - if _, err := remoteClient.RemoteStop(ctx, request); err != nil { - code := connect.CodeOf(err) - if code == connect.CodeNotFound { - return nil - } - return err - } - - return nil -} - -// RemoteSpawn creates an actor on a remote node. The given actor needs to be registered on the remote node using the Register method of ActorSystem -func RemoteSpawn(ctx context.Context, host string, port int, name, actorType string) error { - remoteClient := internalpbconnect.NewRemotingServiceClient( - http.NewClient(), - http.URL(host, port), - ) - - request := connect.NewRequest(&internalpb.RemoteSpawnRequest{ - Host: host, - Port: int32(port), - ActorName: name, - ActorType: actorType, - }) - - if _, err := remoteClient.RemoteSpawn(ctx, request); err != nil { - code := connect.CodeOf(err) - if code == connect.CodeFailedPrecondition { - connectErr := err.(*connect.Error) - e := connectErr.Unwrap() - // TODO: find a better way to use errors.Is with connect.Error - if strings.Contains(e.Error(), ErrTypeNotRegistered.Error()) { - return ErrTypeNotRegistered - } - } - return err - } - return nil -} - // toReceiveContext creates a ReceiveContext provided a message and a receiver func toReceiveContext(ctx context.Context, to *PID, message proto.Message, async bool) (*ReceiveContext, error) { switch msg := message.(type) { diff --git a/actors/api_test.go b/actors/api_test.go index 38ce52dd..106fedd2 100644 --- a/actors/api_test.go +++ b/actors/api_test.go @@ -29,15 +29,11 @@ import ( "testing" "time" - "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/travisjeffery/go-dynaport" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - "github.com/tochemey/goakt/v2/address" - "github.com/tochemey/goakt/v2/goaktpb" "github.com/tochemey/goakt/v2/internal/internalpb" "github.com/tochemey/goakt/v2/internal/lib" "github.com/tochemey/goakt/v2/log" @@ -45,1808 +41,518 @@ import ( ) func TestAsk(t *testing.T) { - t.Run("With running actor", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := Ask(ctx, actorRef, message, replyTimeout) - // perform some assertions - require.NoError(t, err) - assert.NotNil(t, reply) - expected := &testpb.Reply{Content: "received message"} - assert.True(t, proto.Equal(expected, reply)) - - err = sys.Stop(ctx) - }) - t.Run("With stopped actor", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // Shutdown the actor after some time - lib.Pause(time.Second) - require.NoError(t, actorRef.Shutdown(ctx)) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := Ask(ctx, actorRef, message, replyTimeout) - // perform some assertions - require.Error(t, err) - assert.EqualError(t, err, ErrDead.Error()) - assert.Nil(t, reply) - - err = sys.Stop(ctx) - }) - t.Run("With request timeout", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithReplyTimeout(replyTimeout), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // create a message to send to the test actor - message := new(testpb.TestTimeout) - // send the message to the actor - reply, err := Ask(ctx, actorRef, message, replyTimeout) - // perform some assertions - require.Error(t, err) - assert.EqualError(t, err, ErrRequestTimeout.Error()) - assert.Nil(t, reply) - - err = sys.Stop(ctx) - }) - t.Run("With invalid remote message", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - lib.Pause(time.Second) - - // create a message to send to the test actor - message := &internalpb.RemoteMessage{ - Message: &anypb.Any{}, - } - // send the message to the actor - reply, err := Ask(ctx, actorRef, message, replyTimeout) - // perform some assertions - require.Error(t, err) - assert.Nil(t, reply) - - err = sys.Stop(ctx) - }) - t.Run("With Batch request happy path", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // create a message to send to the test actor - // send the message to the actor - replies, err := BatchAsk(ctx, actorRef, replyTimeout, new(testpb.TestReply), new(testpb.TestReply)) - // perform some assertions - require.NoError(t, err) - assert.NotNil(t, replies) - assert.NotEmpty(t, replies) - assert.Len(t, replies, 2) - - for reply := range replies { + t.Run( + "With running actor", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := Ask(ctx, actorRef, message, replyTimeout) + // perform some assertions + require.NoError(t, err) + assert.NotNil(t, reply) expected := &testpb.Reply{Content: "received message"} assert.True(t, proto.Equal(expected, reply)) - } - - err = sys.Stop(ctx) - }) - t.Run("With Batch request with timeout", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithReplyTimeout(replyTimeout), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // create a message to send to the test actor - // send the message to the actor - replies, err := BatchAsk(ctx, actorRef, replyTimeout, new(testpb.TestTimeout), new(testpb.TestReply)) - // perform some assertions - require.Error(t, err) - require.EqualError(t, err, ErrRequestTimeout.Error()) - assert.Empty(t, replies) - - // stop the actor after some time - // this is due to the actor Waitgroup to gracefully close - lib.Pause(time.Second) - - err = sys.Stop(ctx) - }) - t.Run("With Batch request with dead actor", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithReplyTimeout(replyTimeout), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // stop the actor - require.NoError(t, actorRef.Shutdown(ctx)) - - // create a message to send to the test actor - // send the message to the actor - replies, err := BatchAsk(ctx, actorRef, replyTimeout, new(testpb.TestTimeout), new(testpb.TestReply)) - // perform some assertions - require.Error(t, err) - assert.Empty(t, replies) - - err = sys.Stop(ctx) - }) + + err = sys.Stop(ctx) + }, + ) + t.Run( + "With stopped actor", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // Shutdown the actor after some time + lib.Pause(time.Second) + require.NoError(t, actorRef.Shutdown(ctx)) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := Ask(ctx, actorRef, message, replyTimeout) + // perform some assertions + require.Error(t, err) + assert.EqualError(t, err, ErrDead.Error()) + assert.Nil(t, reply) + + err = sys.Stop(ctx) + }, + ) + t.Run( + "With request timeout", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithReplyTimeout(replyTimeout), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // create a message to send to the test actor + message := new(testpb.TestTimeout) + // send the message to the actor + reply, err := Ask(ctx, actorRef, message, replyTimeout) + // perform some assertions + require.Error(t, err) + assert.EqualError(t, err, ErrRequestTimeout.Error()) + assert.Nil(t, reply) + + err = sys.Stop(ctx) + }, + ) + t.Run( + "With invalid remote message", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + lib.Pause(time.Second) + + // create a message to send to the test actor + message := &internalpb.RemoteMessage{ + Message: &anypb.Any{}, + } + // send the message to the actor + reply, err := Ask(ctx, actorRef, message, replyTimeout) + // perform some assertions + require.Error(t, err) + assert.Nil(t, reply) + + err = sys.Stop(ctx) + }, + ) + t.Run( + "With Batch request happy path", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // create a message to send to the test actor + // send the message to the actor + replies, err := BatchAsk(ctx, actorRef, replyTimeout, new(testpb.TestReply), new(testpb.TestReply)) + // perform some assertions + require.NoError(t, err) + assert.NotNil(t, replies) + assert.NotEmpty(t, replies) + assert.Len(t, replies, 2) + + for reply := range replies { + expected := &testpb.Reply{Content: "received message"} + assert.True(t, proto.Equal(expected, reply)) + } + + err = sys.Stop(ctx) + }, + ) + t.Run( + "With Batch request with timeout", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithReplyTimeout(replyTimeout), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // create a message to send to the test actor + // send the message to the actor + replies, err := BatchAsk(ctx, actorRef, replyTimeout, new(testpb.TestTimeout), new(testpb.TestReply)) + // perform some assertions + require.Error(t, err) + require.EqualError(t, err, ErrRequestTimeout.Error()) + assert.Empty(t, replies) + + // stop the actor after some time + // this is due to the actor Waitgroup to gracefully close + lib.Pause(time.Second) + + err = sys.Stop(ctx) + }, + ) + t.Run( + "With Batch request with dead actor", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithReplyTimeout(replyTimeout), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // stop the actor + require.NoError(t, actorRef.Shutdown(ctx)) + + // create a message to send to the test actor + // send the message to the actor + replies, err := BatchAsk(ctx, actorRef, replyTimeout, new(testpb.TestTimeout), new(testpb.TestReply)) + // perform some assertions + require.Error(t, err) + assert.Empty(t, replies) + + err = sys.Stop(ctx) + }, + ) } func TestTell(t *testing.T) { - t.Run("With running actor", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - lib.Pause(time.Second) - - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = Tell(ctx, actorRef, message) - // perform some assertions - require.NoError(t, err) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With stopped actor", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // Shutdown the actor after some time - lib.Pause(time.Second) - require.NoError(t, actorRef.Shutdown(ctx)) - - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = Tell(ctx, actorRef, message) - // perform some assertions - require.Error(t, err) - assert.EqualError(t, err, ErrDead.Error()) - - err = sys.Stop(ctx) - }) - t.Run("With invalid remote message", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // create a message to send to the test actor - message := &internalpb.RemoteMessage{ - Message: &anypb.Any{}, - } - // send the message to the actor - err = Tell(ctx, actorRef, message) - require.Error(t, err) - - err = sys.Stop(ctx) - }) - t.Run("With Batch request", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - lib.Pause(time.Second) - - // create a message to send to the test actor - // send the message to the actor - err = BatchTell(ctx, actorRef, new(testpb.TestSend), new(testpb.TestSend)) - // perform some assertions - require.NoError(t, err) - // wait for processing to be done - lib.Pause(500 * time.Millisecond) - require.EqualValues(t, 2, actorRef.ProcessedCount()-1) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch request with a dead actor", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled()) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - lib.Pause(time.Second) - require.NoError(t, actorRef.Shutdown(ctx)) - - // create a message to send to the test actor - // send the message to the actor - err = BatchTell(ctx, actorRef, new(testpb.TestSend), new(testpb.TestSend)) - // perform some assertions - require.Error(t, err) - require.EqualError(t, err, ErrDead.Error()) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) -} + t.Run( + "With running actor", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) -func TestRemoteTell(t *testing.T) { - t.Run("With happy path", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - for i := 0; i < 10; i++ { - err = RemoteTell(ctx, addr, message) + lib.Pause(time.Second) + + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = Tell(ctx, actorRef, message) // perform some assertions require.NoError(t, err) - } - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With invalid message", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - err = RemoteTell(ctx, addr, nil) - // perform some assertions - require.Error(t, err) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With remote service failure", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // create a wrong address - addr := &goaktpb.Address{ - Host: host, - Port: 2222, - Name: "", - Id: "", - } - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = RemoteTell(ctx, address.From(addr), message) - // perform some assertions - require.Error(t, err) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With remoting disabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // let us disable remoting - actorsSystem := sys.(*actorSystem) - actorsSystem.remotingEnabled.Store(false) - - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = RemoteTell(ctx, addr, message) - // perform some assertions - require.Error(t, err) - require.EqualError(t, err, "failed_precondition: remoting is not enabled") - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch request", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - // create a message to send to the test actor - messages := make([]proto.Message, 10) - // send the message to the actor - for i := 0; i < 10; i++ { - messages[i] = new(testpb.TestSend) - } - - err = RemoteBatchTell(ctx, addr, messages...) - require.NoError(t, err) - - // wait for processing to complete on the actor side - lib.Pause(500 * time.Millisecond) - require.EqualValues(t, 10, actorRef.ProcessedCount()-1) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch service failure", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // create a wrong address - addr := &goaktpb.Address{ - Host: host, - Port: 2222, - Name: "", - Id: "", - } - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = RemoteBatchTell(ctx, address.From(addr), message) - // perform some assertions - require.Error(t, err) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch when remoting is disabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // let us disable remoting - actorsSystem := sys.(*actorSystem) - actorsSystem.remotingEnabled.Store(false) - - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = RemoteBatchTell(ctx, addr, message) - // perform some assertions - require.Error(t, err) - require.EqualError(t, err, "failed_precondition: remoting is not enabled") - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With actor not found", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // stop the actor when wait for cleanup to take place - require.NoError(t, actorRef.Shutdown(ctx)) - lib.Pause(time.Second) - - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = RemoteTell(ctx, addr, message) - // perform some assertions - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch actor not found", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // stop the actor when wait for cleanup to take place - require.NoError(t, actorRef.Shutdown(ctx)) - lib.Pause(time.Second) - - // create a message to send to the test actor - message := new(testpb.TestSend) - // send the message to the actor - err = RemoteBatchTell(ctx, addr, message) - // perform some assertions - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) -} -func TestRemoteAsk(t *testing.T) { - t.Run("With happy path", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := RemoteAsk(ctx, addr, message, time.Minute) - // perform some assertions - require.NoError(t, err) - require.NotNil(t, reply) - require.True(t, reply.MessageIs(new(testpb.Reply))) - - actual := new(testpb.Reply) - err = reply.UnmarshalTo(actual) - require.NoError(t, err) - - expected := &testpb.Reply{Content: "received message"} - assert.True(t, proto.Equal(expected, actual)) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With invalid message", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // send the message to the actor - reply, err := RemoteAsk(ctx, addr, nil, time.Minute) - // perform some assertions - require.Error(t, err) - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With remote service failure", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr := &goaktpb.Address{ - Host: host, - Port: 2222, - Name: "", - Id: "", - } - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := RemoteAsk(ctx, address.From(addr), message, time.Minute) - // perform some assertions - require.Error(t, err) - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, sys.Stop(ctx)) - }) - }) - t.Run("With remoting disabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // let us disable remoting - actorsSystem := sys.(*actorSystem) - actorsSystem.remotingEnabled.Store(false) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := RemoteAsk(ctx, addr, message, time.Minute) - // perform some assertions - require.Error(t, err) - require.EqualError(t, err, "failed_precondition: remoting is not enabled") - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch request", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - replies, err := RemoteBatchAsk(ctx, addr, message) - // perform some assertions - require.NoError(t, err) - require.Len(t, replies, 1) - require.NotNil(t, replies[0]) - require.True(t, replies[0].MessageIs(new(testpb.Reply))) - - actual := new(testpb.Reply) - err = replies[0].UnmarshalTo(actual) - require.NoError(t, err) - - expected := &testpb.Reply{Content: "received message"} - assert.True(t, proto.Equal(expected, actual)) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch service failure", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr := &goaktpb.Address{ - Host: host, - Port: 2222, - Name: "", - Id: "", - } - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := RemoteBatchAsk(ctx, address.From(addr), message) - // perform some assertions - require.Error(t, err) - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, sys.Stop(ctx)) - }) - }) - t.Run("With Batch when remoting is disabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // let us disable remoting - actorsSystem := sys.(*actorSystem) - actorsSystem.remotingEnabled.Store(false) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := RemoteBatchAsk(ctx, addr, message) - // perform some assertions - require.Error(t, err) - require.EqualError(t, err, "failed_precondition: remoting is not enabled") - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, sys.Stop(ctx)) - }) - }) - t.Run("With actor not found", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithJanitorInterval(30*time.Millisecond), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // stop the actor when wait for cleanup to take place - require.NoError(t, actorRef.Shutdown(ctx)) - lib.Pause(time.Second) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := RemoteAsk(ctx, addr, message, time.Minute) - // perform some assertions - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) - t.Run("With Batch actor not found", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithJanitorInterval(30*time.Millisecond), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - // stop the actor when wait for cleanup to take place - require.NoError(t, actorRef.Shutdown(ctx)) - lib.Pause(time.Second) - - // create a message to send to the test actor - message := new(testpb.TestReply) - // send the message to the actor - reply, err := RemoteBatchAsk(ctx, addr, message) - // perform some assertions - require.Error(t, err) - require.Contains(t, err.Error(), "not found") - require.Nil(t, reply) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) -} + // stop the actor after some time + lib.Pause(time.Second) -func TestAPIRemoteLookup(t *testing.T) { - t.Run("When remoting is not enabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // let us disable remoting - actorsSystem := sys.(*actorSystem) - actorsSystem.remotingEnabled.Store(false) - - // create a test actor - actorName := "test" - // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.Error(t, err) - require.EqualError(t, err, "failed_precondition: remoting is not enabled") - require.Nil(t, addr) - - t.Cleanup(func() { - assert.NoError(t, sys.Stop(ctx)) - }) - }) -} + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With stopped actor", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) -func TestAPIRemoteReSpawn(t *testing.T) { - t.Run("When remoting is not enabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // let us disable remoting - actorsSystem := sys.(*actorSystem) - actorsSystem.remotingEnabled.Store(false) - - // create a test actor - actorName := "test" - // get the address of the actor - err = RemoteReSpawn(ctx, host, remotingPort, actorName) - require.Error(t, err) - require.EqualError(t, err, "failed_precondition: remoting is not enabled") - - t.Cleanup(func() { - assert.NoError(t, sys.Stop(ctx)) - }) - }) - t.Run("When remoting is enabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // assert the actor restart count - pid := actorRef - assert.Zero(t, pid.restartCount.Load()) - - // get the address of the actor - err = RemoteReSpawn(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - assert.EqualValues(t, 1, pid.restartCount.Load()) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) -} + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) -func TestAPIRemoteStop(t *testing.T) { - t.Run("When remoting is not enabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // let us disable remoting - actorsSystem := sys.(*actorSystem) - actorsSystem.remotingEnabled.Store(false) - - // create a test actor - actorName := "test" - // get the address of the actor - err = RemoteStop(ctx, host, remotingPort, actorName) - require.Error(t, err) - require.EqualError(t, err, "failed_precondition: remoting is not enabled") - - t.Cleanup(func() { - assert.NoError(t, sys.Stop(ctx)) - }) - }) - t.Run("When remoting is enabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "0.0.0.0" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - lib.Pause(time.Second) - - // create a test actor - actorName := "test" - actor := newTestActor() - actorRef, err := sys.Spawn(ctx, actorName, actor) - require.NoError(t, err) - assert.NotNil(t, actorRef) - - // assert the actor restart count - pid := actorRef - assert.Zero(t, pid.restartCount.Load()) - - // get the address of the actor - err = RemoteStop(ctx, host, remotingPort, actorName) - require.NoError(t, err) - - assert.Empty(t, sys.Actors()) - - // stop the actor after some time - lib.Pause(time.Second) - - err = sys.Stop(ctx) - assert.NoError(t, err) - }) -} + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // Shutdown the actor after some time + lib.Pause(time.Second) + require.NoError(t, actorRef.Shutdown(ctx)) + + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = Tell(ctx, actorRef, message) + // perform some assertions + require.Error(t, err) + assert.EqualError(t, err, ErrDead.Error()) + + err = sys.Stop(ctx) + }, + ) + t.Run( + "With invalid remote message", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // create a message to send to the test actor + message := &internalpb.RemoteMessage{ + Message: &anypb.Any{}, + } + // send the message to the actor + err = Tell(ctx, actorRef, message) + require.Error(t, err) -func TestAPIRemoteSpawn(t *testing.T) { - t.Run("When remoting is enabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - ports := dynaport.Get(1) - remotingPort := ports[0] - host := "127.0.0.1" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - // create an actor implementation and register it - actor := &exchanger{} - actorName := uuid.NewString() - - // fetching the address of the that actor should return nil address - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - require.Nil(t, addr) - - // register the actor - err = sys.Register(ctx, actor) - require.NoError(t, err) - - // spawn the remote actor - err = RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") - require.NoError(t, err) - - // re-fetching the address of the actor should return not nil address after start - addr, err = RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - require.NotNil(t, addr) - - // send the message to exchanger actor one using remote messaging - reply, err := RemoteAsk(ctx, addr, new(testpb.TestReply), time.Minute) - - require.NoError(t, err) - require.NotNil(t, reply) - require.True(t, reply.MessageIs(new(testpb.Reply))) - - actual := new(testpb.Reply) - err = reply.UnmarshalTo(actual) - require.NoError(t, err) - - expected := new(testpb.Reply) - assert.True(t, proto.Equal(expected, actual)) - - t.Cleanup(func() { err = sys.Stop(ctx) + }, + ) + t.Run( + "With Batch request", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) assert.NoError(t, err) - }) - }) - - t.Run("When actor not registered", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - ports := dynaport.Get(1) - remotingPort := ports[0] - host := "127.0.0.1" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - // create an actor implementation and register it - actorName := uuid.NewString() - - // fetching the address of the that actor should return nil address - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) - require.NoError(t, err) - require.Nil(t, addr) - - // spawn the remote actor - err = RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") - require.Error(t, err) - assert.EqualError(t, err, ErrTypeNotRegistered.Error()) - - t.Cleanup(func() { + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + lib.Pause(time.Second) + + // create a message to send to the test actor + // send the message to the actor + err = BatchTell(ctx, actorRef, new(testpb.TestSend), new(testpb.TestSend)) + // perform some assertions + require.NoError(t, err) + // wait for processing to be done + lib.Pause(500 * time.Millisecond) + require.EqualValues(t, 2, actorRef.ProcessedCount()-1) + err = sys.Stop(ctx) assert.NoError(t, err) - }) - }) - - t.Run("When remoting is not enabled", func(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - ports := dynaport.Get(1) - remotingPort := ports[0] - host := "127.0.0.1" - - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - ) - // assert there are no error - require.NoError(t, err) - - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) - - // create an actor implementation and register it - actorName := uuid.NewString() - - // spawn the remote actor - err = RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") - require.Error(t, err) - - t.Cleanup(func() { + }, + ) + t.Run( + "With Batch request with a dead actor", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + lib.Pause(time.Second) + require.NoError(t, actorRef.Shutdown(ctx)) + + // create a message to send to the test actor + // send the message to the actor + err = BatchTell(ctx, actorRef, new(testpb.TestSend), new(testpb.TestSend)) + // perform some assertions + require.Error(t, err) + require.EqualError(t, err, ErrDead.Error()) + err = sys.Stop(ctx) assert.NoError(t, err) - }) - }) + }, + ) } diff --git a/actors/errors.go b/actors/errors.go index 2bcd0969..2f893160 100644 --- a/actors/errors.go +++ b/actors/errors.go @@ -85,6 +85,8 @@ var ( ErrFullMailbox = errors.New("mailbox is full") // ErrSchedulerNotStarted is returned when the scheduler has not started ErrSchedulerNotStarted = errors.New("scheduler has not started") + // ErrInvalidMessage is returned when an invalid remote message is sent + ErrInvalidMessage = func(err error) error { return fmt.Errorf("invalid remote message: %w", err) } ) // eof returns true if the given error is an EOF error diff --git a/actors/option.go b/actors/option.go index c8d0215c..fba69dfb 100644 --- a/actors/option.go +++ b/actors/option.go @@ -51,123 +51,153 @@ func (f OptionFunc) Apply(c *actorSystem) { // WithExpireActorAfter sets the actor expiry duration. // After such duration an idle actor will be expired and removed from the actor system func WithExpireActorAfter(duration time.Duration) Option { - return OptionFunc(func(a *actorSystem) { - a.expireActorAfter = duration - }) + return OptionFunc( + func(a *actorSystem) { + a.expireActorAfter = duration + }, + ) } // WithLogger sets the actor system custom log func WithLogger(logger log.Logger) Option { - return OptionFunc(func(a *actorSystem) { - a.logger = logger - }) + return OptionFunc( + func(a *actorSystem) { + a.logger = logger + }, + ) } // WithReplyTimeout sets how long in seconds an actor should reply a command // in a receive-reply pattern func WithReplyTimeout(timeout time.Duration) Option { - return OptionFunc(func(a *actorSystem) { - a.askTimeout = timeout - }) + return OptionFunc( + func(a *actorSystem) { + a.askTimeout = timeout + }, + ) } // WithActorInitMaxRetries sets the number of times to retry an actor init process func WithActorInitMaxRetries(max int) Option { - return OptionFunc(func(a *actorSystem) { - a.actorInitMaxRetries = max - }) + return OptionFunc( + func(a *actorSystem) { + a.actorInitMaxRetries = max + }, + ) } // WithPassivationDisabled disable the passivation mode func WithPassivationDisabled() Option { - return OptionFunc(func(a *actorSystem) { - a.expireActorAfter = -1 - }) + return OptionFunc( + func(a *actorSystem) { + a.expireActorAfter = -1 + }, + ) } // WithSupervisorDirective sets the supervisor strategy directive func WithSupervisorDirective(directive SupervisorDirective) Option { - return OptionFunc(func(a *actorSystem) { - a.supervisorDirective = directive - }) + return OptionFunc( + func(a *actorSystem) { + a.supervisorDirective = directive + }, + ) } // WithRemoting enables remoting on the actor system func WithRemoting(host string, port int32) Option { - return OptionFunc(func(a *actorSystem) { - a.remotingEnabled.Store(true) - a.port = port - a.host = host - }) + return OptionFunc( + func(a *actorSystem) { + a.remotingEnabled.Store(true) + a.port = port + a.host = host + }, + ) } // WithClustering enables the cluster mode. // Deprecated: use rather WithCluster which offers a fluent api to set cluster configuration func WithClustering(provider discovery.Provider, partitionCount uint64, minimumPeersQuorum uint16, discoveryPort, peersPort int, kinds ...Actor) Option { - return OptionFunc(func(a *actorSystem) { - a.clusterEnabled.Store(true) - replicaCount := 2 - if minimumPeersQuorum < 2 { - replicaCount = 1 - } - - a.clusterConfig = NewClusterConfig(). - WithDiscovery(provider). - WithPartitionCount(partitionCount). - WithDiscoveryPort(discoveryPort). - WithPeersPort(peersPort). - WithMinimumPeersQuorum(uint32(minimumPeersQuorum)). - WithReplicaCount(uint32(replicaCount)). - WithKinds(kinds...) - }) + return OptionFunc( + func(a *actorSystem) { + a.clusterEnabled.Store(true) + replicaCount := 2 + if minimumPeersQuorum < 2 { + replicaCount = 1 + } + + a.clusterConfig = NewClusterConfig(). + WithDiscovery(provider). + WithPartitionCount(partitionCount). + WithDiscoveryPort(discoveryPort). + WithPeersPort(peersPort). + WithMinimumPeersQuorum(uint32(minimumPeersQuorum)). + WithReplicaCount(uint32(replicaCount)). + WithKinds(kinds...) + }, + ) } // WithCluster enables the cluster mode func WithCluster(config *ClusterConfig) Option { - return OptionFunc(func(a *actorSystem) { - a.clusterEnabled.Store(true) - a.clusterConfig = config - }) + return OptionFunc( + func(a *actorSystem) { + a.clusterEnabled.Store(true) + a.clusterConfig = config + }, + ) } // WithShutdownTimeout sets the shutdown timeout func WithShutdownTimeout(timeout time.Duration) Option { - return OptionFunc(func(a *actorSystem) { - a.shutdownTimeout = timeout - }) + return OptionFunc( + func(a *actorSystem) { + a.shutdownTimeout = timeout + }, + ) } // WithStash sets the stash buffer size func WithStash() Option { - return OptionFunc(func(a *actorSystem) { - a.stashEnabled = true - }) + return OptionFunc( + func(a *actorSystem) { + a.stashEnabled = true + }, + ) } // WithPartitionHasher sets the partition hasher. func WithPartitionHasher(hasher hash.Hasher) Option { - return OptionFunc(func(a *actorSystem) { - a.partitionHasher = hasher - }) + return OptionFunc( + func(a *actorSystem) { + a.partitionHasher = hasher + }, + ) } // WithActorInitTimeout sets how long in seconds an actor start timeout func WithActorInitTimeout(timeout time.Duration) Option { - return OptionFunc(func(a *actorSystem) { - a.actorInitTimeout = timeout - }) + return OptionFunc( + func(a *actorSystem) { + a.actorInitTimeout = timeout + }, + ) } // WithPeerStateLoopInterval sets the peer state loop interval func WithPeerStateLoopInterval(interval time.Duration) Option { - return OptionFunc(func(system *actorSystem) { - system.peersStateLoopInterval = interval - }) + return OptionFunc( + func(system *actorSystem) { + system.peersStateLoopInterval = interval + }, + ) } // WithJanitorInterval sets the janitor interval func WithJanitorInterval(interval time.Duration) Option { - return OptionFunc(func(system *actorSystem) { - system.janitorInterval = interval - }) + return OptionFunc( + func(system *actorSystem) { + system.janitorInterval = interval + }, + ) } diff --git a/actors/option_test.go b/actors/option_test.go index aa4b1244..44cf0e80 100644 --- a/actors/option_test.go +++ b/actors/option_test.go @@ -41,6 +41,7 @@ func TestOption(t *testing.T) { atomicTrue.Store(true) clusterConfig := NewClusterConfig() hasher := hash.DefaultHasher() + testCases := []struct { name string option Option diff --git a/actors/pid.go b/actors/pid.go index 2a04568d..f764b12b 100644 --- a/actors/pid.go +++ b/actors/pid.go @@ -28,7 +28,6 @@ import ( "context" "errors" "fmt" - stdhttp "net/http" "os" "strings" "sync" @@ -46,9 +45,7 @@ import ( "github.com/tochemey/goakt/v2/goaktpb" "github.com/tochemey/goakt/v2/internal/errorschain" "github.com/tochemey/goakt/v2/internal/eventstream" - "github.com/tochemey/goakt/v2/internal/http" "github.com/tochemey/goakt/v2/internal/internalpb" - "github.com/tochemey/goakt/v2/internal/internalpb/internalpbconnect" "github.com/tochemey/goakt/v2/internal/slice" "github.com/tochemey/goakt/v2/internal/types" "github.com/tochemey/goakt/v2/log" @@ -146,9 +143,6 @@ type PID struct { // supervisor strategy supervisorDirective SupervisorDirective - // http client - httpClient *stdhttp.Client - // specifies the actor behavior stack behaviorStack *behaviorStack @@ -172,6 +166,8 @@ type PID struct { // atomic flag indicating whether the actor is processing messages processingMessages atomic.Int32 + + remoting *Remoting } // newPID creates a new pid @@ -198,7 +194,6 @@ func newPID(ctx context.Context, address *address.Address, actor Actor, opts ... address: address, fieldsLocker: new(sync.RWMutex), stopLocker: new(sync.Mutex), - httpClient: http.NewClient(), mailbox: NewUnboundedMailbox(), stashBuffer: nil, stashLocker: &sync.Mutex{}, @@ -211,6 +206,7 @@ func newPID(ctx context.Context, address *address.Address, actor Actor, opts ... watchersNotificationStopSignal: make(chan types.Unit, 1), receiveSignal: make(chan types.Unit, 1), receiveStopSignal: make(chan types.Unit, 1), + remoting: NewRemoting(), } pid.initMaxRetries.Store(DefaultInitMaxRetries) @@ -408,10 +404,12 @@ func (pid *PID) Restart(ctx context.Context) error { pid.restartCount.Inc() if pid.eventsStream != nil { - pid.eventsStream.Publish(eventsTopic, &goaktpb.ActorRestarted{ - Address: pid.Address().Address, - RestartedAt: timestamppb.Now(), - }) + pid.eventsStream.Publish( + eventsTopic, &goaktpb.ActorRestarted{ + Address: pid.Address().Address, + RestartedAt: timestamppb.Now(), + }, + ) } return nil @@ -470,6 +468,7 @@ func (pid *PID) SpawnChild(ctx context.Context, name string, actor Actor, opts . withEventsStream(pid.eventsStream), withInitTimeout(pid.initTimeout.Load()), withShutdownTimeout(pid.shutdownTimeout.Load()), + withRemoting(pid.remoting), } spawnConfig := newSpawnConfig(opts...) @@ -477,7 +476,8 @@ func (pid *PID) SpawnChild(ctx context.Context, name string, actor Actor, opts . pidOptions = append(pidOptions, withMailbox(spawnConfig.mailbox)) } - cid, err := newPID(ctx, + cid, err := newPID( + ctx, childAddress, actor, pidOptions..., @@ -496,11 +496,13 @@ func (pid *PID) SpawnChild(ctx context.Context, name string, actor Actor, opts . pid.Watch(cid) if eventsStream != nil { - eventsStream.Publish(eventsTopic, &goaktpb.ActorChildCreated{ - Address: cid.Address().Address, - CreatedAt: timestamppb.Now(), - Parent: pid.Address().Address, - }) + eventsStream.Publish( + eventsTopic, &goaktpb.ActorChildCreated{ + Address: cid.Address().Address, + CreatedAt: timestamppb.Now(), + Parent: pid.Address().Address, + }, + ) } // set the actor in the given actor system registry @@ -532,10 +534,12 @@ func (pid *PID) PipeTo(ctx context.Context, to *PID, task future.Task) error { return ErrDead } - go pid.handleCompletion(ctx, &taskCompletion{ - Receiver: to, - Task: task, - }) + go pid.handleCompletion( + ctx, &taskCompletion{ + Receiver: to, + Task: task, + }, + ) return nil } @@ -651,12 +655,18 @@ func (pid *PID) BatchAsk(ctx context.Context, to *PID, messages ...proto.Message // RemoteLookup look for an actor address on a remote node. func (pid *PID) RemoteLookup(ctx context.Context, host string, port int, name string) (addr *goaktpb.Address, err error) { - remoteClient := pid.remotingClient(host, port) - request := connect.NewRequest(&internalpb.RemoteLookupRequest{ - Host: host, - Port: int32(port), - Name: name, - }) + if pid.remoting == nil { + return nil, ErrRemotingDisabled + } + + remoteClient := pid.remoting.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteLookupRequest{ + Host: host, + Port: int32(port), + Name: name, + }, + ) response, err := remoteClient.RemoteLookup(ctx, request) if err != nil { @@ -672,12 +682,16 @@ func (pid *PID) RemoteLookup(ctx context.Context, host string, port int, name st // RemoteTell sends a message to an actor remotely without expecting any reply func (pid *PID) RemoteTell(ctx context.Context, to *address.Address, message proto.Message) error { + if pid.remoting == nil { + return ErrRemotingDisabled + } + marshaled, err := anypb.New(message) if err != nil { return err } - remoteService := pid.remotingClient(to.GetHost(), int(to.GetPort())) + remoteService := pid.remoting.Client(to.GetHost(), int(to.GetPort())) sender := &goaktpb.Address{ Host: pid.Address().Host(), @@ -720,12 +734,16 @@ func (pid *PID) RemoteTell(ctx context.Context, to *address.Address, message pro // RemoteAsk sends a synchronous message to another actor remotely and expect a response. func (pid *PID) RemoteAsk(ctx context.Context, to *address.Address, message proto.Message) (response *anypb.Any, err error) { + if pid.remoting == nil { + return nil, ErrRemotingDisabled + } + marshaled, err := anypb.New(message) if err != nil { return nil, err } - remoteService := pid.remotingClient(to.GetHost(), int(to.GetPort())) + remoteService := pid.remoting.Client(to.GetHost(), int(to.GetPort())) senderAddress := pid.Address() sender := &goaktpb.Address{ @@ -782,7 +800,11 @@ func (pid *PID) RemoteAsk(ctx context.Context, to *address.Address, message prot // RemoteBatchTell sends a batch of messages to a remote actor in a way fire-and-forget manner // Messages are processed one after the other in the order they are sent. -func (pid *PID) RemoteBatchTell(ctx context.Context, to *address.Address, messages ...proto.Message) error { +func (pid *PID) RemoteBatchTell(ctx context.Context, to *address.Address, messages []proto.Message) error { + if pid.remoting == nil { + return ErrRemotingDisabled + } + if len(messages) == 1 { return pid.RemoteTell(ctx, to, messages[0]) } @@ -801,16 +823,18 @@ func (pid *PID) RemoteBatchTell(ctx context.Context, to *address.Address, messag return ErrInvalidRemoteMessage(err) } - requests = append(requests, &internalpb.RemoteTellRequest{ - RemoteMessage: &internalpb.RemoteMessage{ - Sender: sender, - Receiver: to.Address, - Message: packed, + requests = append( + requests, &internalpb.RemoteTellRequest{ + RemoteMessage: &internalpb.RemoteMessage{ + Sender: sender, + Receiver: to.Address, + Message: packed, + }, }, - }) + ) } - remoteService := pid.remotingClient(to.GetHost(), int(to.GetPort())) + remoteService := pid.remoting.Client(to.GetHost(), int(to.GetPort())) stream := remoteService.RemoteTell(ctx) for _, request := range requests { @@ -836,7 +860,11 @@ func (pid *PID) RemoteBatchTell(ctx context.Context, to *address.Address, messag // RemoteBatchAsk sends a synchronous bunch of messages to a remote actor and expect responses in the same order as the messages. // Messages are processed one after the other in the order they are sent. // This can hinder performance if it is not properly used. -func (pid *PID) RemoteBatchAsk(ctx context.Context, to *address.Address, messages ...proto.Message) (responses []*anypb.Any, err error) { +func (pid *PID) RemoteBatchAsk(ctx context.Context, to *address.Address, messages []proto.Message) (responses []*anypb.Any, err error) { + if pid.remoting == nil { + return nil, ErrRemotingDisabled + } + sender := &goaktpb.Address{ Host: pid.Address().Host(), Port: int32(pid.Address().Port()), @@ -851,16 +879,18 @@ func (pid *PID) RemoteBatchAsk(ctx context.Context, to *address.Address, message return nil, ErrInvalidRemoteMessage(err) } - requests = append(requests, &internalpb.RemoteAskRequest{ - RemoteMessage: &internalpb.RemoteMessage{ - Sender: sender, - Receiver: to.Address, - Message: packed, + requests = append( + requests, &internalpb.RemoteAskRequest{ + RemoteMessage: &internalpb.RemoteMessage{ + Sender: sender, + Receiver: to.Address, + Message: packed, + }, }, - }) + ) } - remoteService := pid.remotingClient(to.GetHost(), int(to.GetPort())) + remoteService := pid.remoting.Client(to.GetHost(), int(to.GetPort())) stream := remoteService.RemoteAsk(ctx) errc := make(chan error, 1) @@ -902,12 +932,19 @@ func (pid *PID) RemoteBatchAsk(ctx context.Context, to *address.Address, message // RemoteStop stops an actor on a remote node func (pid *PID) RemoteStop(ctx context.Context, host string, port int, name string) error { - remoteService := pid.remotingClient(host, port) - request := connect.NewRequest(&internalpb.RemoteStopRequest{ - Host: host, - Port: int32(port), - Name: name, - }) + if pid.remoting == nil { + return ErrRemotingDisabled + } + + remoteService := pid.remoting.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteStopRequest{ + Host: host, + Port: int32(port), + Name: name, + }, + ) + if _, err := remoteService.RemoteStop(ctx, request); err != nil { code := connect.CodeOf(err) if code == connect.CodeNotFound { @@ -920,13 +957,20 @@ func (pid *PID) RemoteStop(ctx context.Context, host string, port int, name stri // RemoteSpawn creates an actor on a remote node. The given actor needs to be registered on the remote node using the Register method of ActorSystem func (pid *PID) RemoteSpawn(ctx context.Context, host string, port int, name, actorType string) error { - remoteService := pid.remotingClient(host, port) - request := connect.NewRequest(&internalpb.RemoteSpawnRequest{ - Host: host, - Port: int32(port), - ActorName: name, - ActorType: actorType, - }) + if pid.remoting == nil { + return ErrRemotingDisabled + } + + remoteService := pid.remoting.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteSpawnRequest{ + Host: host, + Port: int32(port), + ActorName: name, + ActorType: actorType, + }, + ) + if _, err := remoteService.RemoteSpawn(ctx, request); err != nil { code := connect.CodeOf(err) if code == connect.CodeFailedPrecondition { @@ -944,12 +988,19 @@ func (pid *PID) RemoteSpawn(ctx context.Context, host string, port int, name, ac // RemoteReSpawn restarts an actor on a remote node. func (pid *PID) RemoteReSpawn(ctx context.Context, host string, port int, name string) error { - remoteService := pid.remotingClient(host, port) - request := connect.NewRequest(&internalpb.RemoteReSpawnRequest{ - Host: host, - Port: int32(port), - Name: name, - }) + if pid.remoting == nil { + return ErrRemotingDisabled + } + + remoteService := pid.remoting.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteReSpawnRequest{ + Host: host, + Port: int32(port), + Name: name, + }, + ) + if _, err := remoteService.RemoteReSpawn(ctx, request); err != nil { code := connect.CodeOf(err) if code == connect.CodeNotFound { @@ -985,10 +1036,12 @@ func (pid *PID) Shutdown(ctx context.Context) error { } if pid.eventsStream != nil { - pid.eventsStream.Publish(eventsTopic, &goaktpb.ActorStopped{ - Address: pid.Address().Address, - StoppedAt: timestamppb.Now(), - }) + pid.eventsStream.Publish( + eventsTopic, &goaktpb.ActorStopped{ + Address: pid.Address().Address, + StoppedAt: timestamppb.Now(), + }, + ) } pid.stopLocker.Unlock() @@ -1141,10 +1194,12 @@ func (pid *PID) init(ctx context.Context) error { pid.logger.Info("Initialization process successfully completed.") if pid.eventsStream != nil { - pid.eventsStream.Publish(eventsTopic, &goaktpb.ActorStarted{ - Address: pid.Address().Address, - StartedAt: timestamppb.Now(), - }) + pid.eventsStream.Publish( + eventsTopic, &goaktpb.ActorStarted{ + Address: pid.Address().Address, + StartedAt: timestamppb.Now(), + }, + ) } cancel() @@ -1195,8 +1250,10 @@ func (pid *PID) freeWatchees(ctx context.Context) error { pid.logger.Debugf("watcher=(%s) unwatching actor=(%s)", pid.ID(), watched.ID()) pid.UnWatch(watched) if err := watched.Shutdown(ctx); err != nil { - errwrap := fmt.Errorf("watcher=(%s) failed to unwatch actor=(%s): %w", - pid.ID(), watched.ID(), err) + errwrap := fmt.Errorf( + "watcher=(%s) failed to unwatch actor=(%s): %w", + pid.ID(), watched.ID(), err, + ) return errwrap } pid.logger.Debugf("watcher=(%s) successfully unwatch actor=(%s)", pid.ID(), watched.ID()) @@ -1213,7 +1270,8 @@ func (pid *PID) freeChildren(ctx context.Context) error { if err := child.Shutdown(ctx); err != nil { errwrap := fmt.Errorf( "parent=(%s) failed to disown child=(%s): %w", pid.ID(), child.ID(), - err) + err, + ) return errwrap } pid.logger.Debugf("parent=(%s) successfully disown child=(%s)", pid.ID(), child.ID()) @@ -1339,7 +1397,10 @@ func (pid *PID) doStop(ctx context.Context) error { }() <-tickerStopSig - pid.httpClient.CloseIdleConnections() + if pid.remoting != nil { + pid.remoting.Close() + } + pid.watchersNotificationStopSignal <- types.Unit{} pid.receiveStopSignal <- types.Unit{} @@ -1416,20 +1477,14 @@ func (pid *PID) toDeadletterQueue(receiveCtx *ReceiveContext, err error) { senderAddr = receiveCtx.Sender().Address().Address } - pid.eventsStream.Publish(eventsTopic, &goaktpb.Deadletter{ - Sender: senderAddr, - Receiver: pid.Address().Address, - Message: msg, - SendTime: timestamppb.Now(), - Reason: err.Error(), - }) -} - -// remotingClient returns an instance of the Remote Service client -func (pid *PID) remotingClient(host string, port int) internalpbconnect.RemotingServiceClient { - return internalpbconnect.NewRemotingServiceClient( - pid.httpClient, - http.URL(host, port), + pid.eventsStream.Publish( + eventsTopic, &goaktpb.Deadletter{ + Sender: senderAddr, + Receiver: pid.Address().Address, + Message: msg, + SendTime: timestamppb.Now(), + Reason: err.Error(), + }, ) } @@ -1487,6 +1542,7 @@ func (pid *PID) supervise(cid *PID, watcher *watcher) { if errors.Is(err, ErrDead) { return } + pid.logger.Errorf("child actor=(%s) is failing: Err=%v", cid.ID(), err) switch directive := pid.supervisorDirective.(type) { case *StopDirective: diff --git a/actors/pid_option.go b/actors/pid_option.go index 75113ace..566ed0eb 100644 --- a/actors/pid_option.go +++ b/actors/pid_option.go @@ -119,3 +119,10 @@ func withInitTimeout(duration time.Duration) pidOption { pid.initTimeout.Store(duration) } } + +// withRemoting set the remoting feature +func withRemoting(remoting *Remoting) pidOption { + return func(pid *PID) { + pid.remoting = remoting + } +} diff --git a/actors/pid_test.go b/actors/pid_test.go index 7f9ea077..bbc71119 100644 --- a/actors/pid_test.go +++ b/actors/pid_test.go @@ -919,68 +919,130 @@ func TestMessaging(t *testing.T) { }) } func TestRemoting(t *testing.T) { - // create the context - ctx := context.TODO() - // define the logger to use - logger := log.DiscardLogger - // generate the remoting port - nodePorts := dynaport.Get(1) - remotingPort := nodePorts[0] - host := "127.0.0.1" + t.Run("When remoting is enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "127.0.0.1" - // create the actor system - sys, err := NewActorSystem("test", - WithLogger(logger), - WithPassivationDisabled(), - WithRemoting(host, int32(remotingPort)), - ) - // assert there are no error - require.NoError(t, err) + // create the actor system + sys, err := NewActorSystem("test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) - // start the actor system - err = sys.Start(ctx) - assert.NoError(t, err) + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) - // create an exchanger one - actorName1 := "Exchange1" - actorRef1, err := sys.Spawn(ctx, actorName1, &exchanger{}) - require.NoError(t, err) - assert.NotNil(t, actorRef1) + // create an exchanger one + actorName1 := "Exchange1" + actorRef1, err := sys.Spawn(ctx, actorName1, &exchanger{}) + require.NoError(t, err) + assert.NotNil(t, actorRef1) - // create an exchanger two - actorName2 := "Exchange2" - actorRef2, err := sys.Spawn(ctx, actorName2, &exchanger{}) - require.NoError(t, err) - assert.NotNil(t, actorRef2) + // create an exchanger two + actorName2 := "Exchange2" + actorRef2, err := sys.Spawn(ctx, actorName2, &exchanger{}) + require.NoError(t, err) + assert.NotNil(t, actorRef2) - // get the address of the exchanger actor one - addr1, err := actorRef2.RemoteLookup(ctx, host, remotingPort, actorName1) - require.NoError(t, err) + // get the address of the exchanger actor one + addr1, err := actorRef2.RemoteLookup(ctx, host, remotingPort, actorName1) + require.NoError(t, err) - // send the message to exchanger actor one using remote messaging - reply, err := actorRef2.RemoteAsk(ctx, address.From(addr1), new(testpb.TestReply)) - // perform some assertions - require.NoError(t, err) - require.NotNil(t, reply) - require.True(t, reply.MessageIs(new(testpb.Reply))) + // send the message to exchanger actor one using remote messaging + reply, err := actorRef2.RemoteAsk(ctx, address.From(addr1), new(testpb.TestReply)) + // perform some assertions + require.NoError(t, err) + require.NotNil(t, reply) + require.True(t, reply.MessageIs(new(testpb.Reply))) - actual := new(testpb.Reply) - err = reply.UnmarshalTo(actual) - require.NoError(t, err) + actual := new(testpb.Reply) + err = reply.UnmarshalTo(actual) + require.NoError(t, err) - expected := new(testpb.Reply) - assert.True(t, proto.Equal(expected, actual)) + expected := new(testpb.Reply) + assert.True(t, proto.Equal(expected, actual)) - // send a message to stop the first exchange actor - err = actorRef2.RemoteTell(ctx, address.From(addr1), new(testpb.TestRemoteSend)) - require.NoError(t, err) + // send a message to stop the first exchange actor + err = actorRef2.RemoteTell(ctx, address.From(addr1), new(testpb.TestRemoteSend)) + require.NoError(t, err) - // stop the actor after some time - lib.Pause(time.Second) + // stop the actor after some time + lib.Pause(time.Second) - t.Cleanup(func() { - err = sys.Stop(ctx) + t.Cleanup(func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }) + }) + t.Run("When remoting is disabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem("test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) assert.NoError(t, err) + + // create an exchanger one + actorName1 := "Exchange1" + actorRef1, err := sys.Spawn(ctx, actorName1, &exchanger{}) + require.NoError(t, err) + assert.NotNil(t, actorRef1) + + // create an exchanger two + actorName2 := "Exchange2" + actorRef2, err := sys.Spawn(ctx, actorName2, &exchanger{}) + require.NoError(t, err) + assert.NotNil(t, actorRef2) + + // get the address of the exchanger actor one + addr1, err := actorRef2.RemoteLookup(ctx, host, remotingPort, actorName1) + require.NoError(t, err) + + actorRef2.remoting = nil + // send the message to exchanger actor one using remote messaging + reply, err := actorRef2.RemoteAsk(ctx, address.From(addr1), new(testpb.TestReply)) + // perform some assertions + require.Error(t, err) + require.Nil(t, reply) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + + // send a message to stop the first exchange actor + err = actorRef2.RemoteTell(ctx, address.From(addr1), new(testpb.TestRemoteSend)) + require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + + // stop the actor after some time + lib.Pause(time.Second) + + t.Cleanup(func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }) }) } func TestActorHandle(t *testing.T) { @@ -1317,7 +1379,7 @@ func TestRemoteLookup(t *testing.T) { assert.NoError(t, sys.Stop(ctx)) }) }) - t.Run("With remoting not enabled", func(t *testing.T) { + t.Run("With remoting server is unreachable", func(t *testing.T) { // create the context ctx := context.TODO() // define the logger to use @@ -1351,6 +1413,49 @@ func TestRemoteLookup(t *testing.T) { require.Error(t, err) require.Nil(t, addr) + t.Cleanup(func() { + assert.NoError(t, sys.Stop(ctx)) + }) + }) + t.Run("With remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem("test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create an exchanger 1 + actorName1 := "Exchange1" + actorRef1, err := sys.Spawn(ctx, actorName1, &exchanger{}) + + require.NoError(t, err) + assert.NotNil(t, actorRef1) + + actorRef1.remoting = nil + + // let us lookup actor two + actorName2 := "Exchange2" + addr, err := actorRef1.RemoteLookup(ctx, host, remotingPort, actorName2) + require.Error(t, err) + require.Nil(t, addr) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + t.Cleanup(func() { assert.NoError(t, sys.Stop(ctx)) }) @@ -1632,7 +1737,48 @@ func TestRemoteReSpawn(t *testing.T) { assert.NoError(t, sys.Stop(ctx)) }) }) - t.Run("With remoting not enabled", func(t *testing.T) { + t.Run("With remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem("test", + WithLogger(logger), + WithRemoting(host, int32(remotingPort)), + WithPassivationDisabled()) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create an exchanger 1 + actorName1 := "Exchange1" + actorRef1, err := sys.Spawn(ctx, actorName1, &exchanger{}) + + require.NoError(t, err) + assert.NotNil(t, actorRef1) + + // for the sake of the test we set the remoting field of actorRef1 + actorRef1.remoting = nil + + actorName2 := "Exchange2" + err = actorRef1.RemoteReSpawn(ctx, host, remotingPort, actorName2) + require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + + t.Cleanup(func() { + assert.NoError(t, sys.Stop(ctx)) + }) + }) + t.Run("With remoting server is unreachable", func(t *testing.T) { // create the context ctx := context.TODO() // define the logger to use @@ -1784,6 +1930,46 @@ func TestRemoteStop(t *testing.T) { err = actorRef1.RemoteStop(ctx, host, remotingPort, actorName2) require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, sys.Stop(ctx)) + }) + }) + t.Run("With remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem("test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create an exchanger 1 + actorName1 := "Exchange1" + actorRef1, err := sys.Spawn(ctx, actorName1, &exchanger{}) + require.NoError(t, err) + assert.NotNil(t, actorRef1) + + actorRef1.remoting = nil + + actorName2 := "Exchange2" + err = actorRef1.RemoteStop(ctx, host, remotingPort, actorName2) + require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + t.Cleanup(func() { assert.NoError(t, sys.Stop(ctx)) }) @@ -1911,7 +2097,6 @@ func TestRemoteSpawn(t *testing.T) { assert.NoError(t, err) }) }) - t.Run("When actor not registered", func(t *testing.T) { // create the context ctx := context.TODO() @@ -1959,7 +2144,45 @@ func TestRemoteSpawn(t *testing.T) { assert.NoError(t, err) }) }) + t.Run("When remote server unreachable", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + ports := dynaport.Get(1) + remotingPort := ports[0] + host := "127.0.0.1" + // create the actor system + sys, err := NewActorSystem("test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create an actor + pid, err := sys.Spawn(ctx, "Exchange1", &exchanger{}) + require.NoError(t, err) + assert.NotNil(t, pid) + + // create an actor implementation and register it + actorName := uuid.NewString() + + // spawn the remote actor + err = pid.RemoteSpawn(ctx, host, remotingPort, actorName, "exchanger") + require.Error(t, err) + + t.Cleanup(func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }) + }) t.Run("When remoting is not enabled", func(t *testing.T) { // create the context ctx := context.TODO() @@ -1974,6 +2197,7 @@ func TestRemoteSpawn(t *testing.T) { sys, err := NewActorSystem("test", WithLogger(logger), WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), ) // assert there are no error require.NoError(t, err) @@ -1988,11 +2212,25 @@ func TestRemoteSpawn(t *testing.T) { assert.NotNil(t, pid) // create an actor implementation and register it + actor := &exchanger{} actorName := uuid.NewString() + // fetching the address of the that actor should return nil address + addr, err := pid.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + require.Nil(t, addr) + + // register the actor + err = sys.Register(ctx, actor) + require.NoError(t, err) + + // disable remoting on pid + pid.remoting = nil + // spawn the remote actor - err = pid.RemoteSpawn(ctx, host, remotingPort, actorName, "exchanger") + err = pid.RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) t.Cleanup(func() { err = sys.Stop(ctx) diff --git a/actors/receive_context.go b/actors/receive_context.go index 0423121b..3019d958 100644 --- a/actors/receive_context.go +++ b/actors/receive_context.go @@ -285,10 +285,10 @@ func (c *ReceiveContext) RemoteAsk(to *address.Address, message proto.Message) ( // RemoteBatchTell sends a batch of messages to a remote actor in a way fire-and-forget manner // Messages are processed one after the other in the order they are sent. -func (c *ReceiveContext) RemoteBatchTell(to *address.Address, messages ...proto.Message) { +func (c *ReceiveContext) RemoteBatchTell(to *address.Address, messages []proto.Message) { recipient := c.self ctx := context.WithoutCancel(c.ctx) - if err := recipient.RemoteBatchTell(ctx, to, messages...); err != nil { + if err := recipient.RemoteBatchTell(ctx, to, messages); err != nil { c.Err(err) } } @@ -296,10 +296,10 @@ func (c *ReceiveContext) RemoteBatchTell(to *address.Address, messages ...proto. // RemoteBatchAsk sends a synchronous bunch of messages to a remote actor and expect responses in the same order as the messages. // Messages are processed one after the other in the order they are sent. // This can hinder performance if it is not properly used. -func (c *ReceiveContext) RemoteBatchAsk(to *address.Address, messages ...proto.Message) (responses []*anypb.Any) { +func (c *ReceiveContext) RemoteBatchAsk(to *address.Address, messages []proto.Message) (responses []*anypb.Any) { recipient := c.self ctx := context.WithoutCancel(c.ctx) - replies, err := recipient.RemoteBatchAsk(ctx, to, messages...) + replies, err := recipient.RemoteBatchAsk(ctx, to, messages) if err != nil { c.Err(err) } diff --git a/actors/receive_context_test.go b/actors/receive_context_test.go index 2f2c2d33..3613feec 100644 --- a/actors/receive_context_test.go +++ b/actors/receive_context_test.go @@ -274,7 +274,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -296,11 +297,13 @@ func TestReceiveContext(t *testing.T) { actor1 := &exchanger{} ports := dynaport.Get(1) actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -322,10 +325,12 @@ func TestReceiveContext(t *testing.T) { lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With failed RemoteAsk", func(t *testing.T) { // create the context @@ -338,7 +343,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -357,11 +363,13 @@ func TestReceiveContext(t *testing.T) { actor1 := &exchanger{} ports := dynaport.Get(1) actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -374,19 +382,23 @@ func TestReceiveContext(t *testing.T) { self: pid1, } - context.RemoteAsk(address.From(&goaktpb.Address{ - Host: "127.0.0.1", - Port: int32(remotingPort), - Name: actorName2, - Id: "", - }), new(testpb.TestReply)) + context.RemoteAsk(address.From( + &goaktpb.Address{ + Host: "127.0.0.1", + Port: int32(remotingPort), + Name: actorName2, + Id: "", + }, + ), new(testpb.TestReply)) require.Error(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With successful RemoteTell", func(t *testing.T) { // create the context @@ -399,7 +411,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -422,11 +435,13 @@ func TestReceiveContext(t *testing.T) { actor1 := &exchanger{} ports := dynaport.Get(1) actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -446,10 +461,12 @@ func TestReceiveContext(t *testing.T) { require.NoError(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With failed RemoteTell", func(t *testing.T) { // create the context @@ -462,7 +479,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -481,11 +499,13 @@ func TestReceiveContext(t *testing.T) { actor1 := &exchanger{} ports := dynaport.Get(1) actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -499,19 +519,23 @@ func TestReceiveContext(t *testing.T) { } // send the message to the exchanger actor one using remote messaging - context.RemoteTell(address.From(&goaktpb.Address{ - Host: "127.0.0.1", - Port: int32(remotingPort), - Name: actorName2, - Id: "", - }), new(testpb.TestRemoteSend)) + context.RemoteTell(address.From( + &goaktpb.Address{ + Host: "127.0.0.1", + Port: int32(remotingPort), + Name: actorName2, + Id: "", + }, + ), new(testpb.TestRemoteSend)) require.Error(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With address not found RemoteLookup", func(t *testing.T) { // create the context @@ -524,7 +548,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -543,11 +568,13 @@ func TestReceiveContext(t *testing.T) { actor1 := &exchanger{} ports := dynaport.Get(1) actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -564,10 +591,12 @@ func TestReceiveContext(t *testing.T) { lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With failed RemoteLookup", func(t *testing.T) { // create the context @@ -580,7 +609,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), ) @@ -598,11 +628,13 @@ func TestReceiveContext(t *testing.T) { actor1 := &exchanger{} ports := dynaport.Get(1) actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -618,10 +650,12 @@ func TestReceiveContext(t *testing.T) { require.Error(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With successful Shutdown", func(t *testing.T) { ctx := context.TODO() @@ -657,11 +691,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -684,9 +720,11 @@ func TestReceiveContext(t *testing.T) { require.NotNil(t, actual) assert.Equal(t, child.Address().String(), actual.Address().String()) - t.Cleanup(func() { - receiveCtx.Shutdown() - }) + t.Cleanup( + func() { + receiveCtx.Shutdown() + }, + ) }) t.Run("With failed SpawnChild", func(t *testing.T) { ctx := context.TODO() @@ -694,11 +732,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -724,11 +764,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -752,9 +794,11 @@ func TestReceiveContext(t *testing.T) { context.Child(name) require.Error(t, context.getError()) - t.Cleanup(func() { - context.Shutdown() - }) + t.Cleanup( + func() { + context.Shutdown() + }, + ) }) t.Run("With dead parent Child", func(t *testing.T) { ctx := context.TODO() @@ -762,11 +806,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -790,9 +836,11 @@ func TestReceiveContext(t *testing.T) { context.Child(name) require.Error(t, context.getError()) - t.Cleanup(func() { - require.NoError(t, child.Shutdown(ctx)) - }) + t.Cleanup( + func() { + require.NoError(t, child.Shutdown(ctx)) + }, + ) }) t.Run("With successful Stop", func(t *testing.T) { ctx := context.TODO() @@ -800,11 +848,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -828,9 +878,11 @@ func TestReceiveContext(t *testing.T) { require.NoError(t, context.getError()) lib.Pause(time.Second) assert.Empty(t, context.Children()) - t.Cleanup(func() { - context.Shutdown() - }) + t.Cleanup( + func() { + context.Shutdown() + }, + ) }) t.Run("With child actor Stop freeing up parent link", func(t *testing.T) { ctx := context.TODO() @@ -838,11 +890,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -870,9 +924,11 @@ func TestReceiveContext(t *testing.T) { lib.Pause(time.Second) assert.Empty(t, context.Children()) - t.Cleanup(func() { - context.Shutdown() - }) + t.Cleanup( + func() { + context.Shutdown() + }, + ) }) t.Run("With failed Stop: child not defined", func(t *testing.T) { ctx := context.TODO() @@ -880,11 +936,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -901,9 +959,11 @@ func TestReceiveContext(t *testing.T) { // stop the child actor context.Stop(NoSender) require.Error(t, context.getError()) - t.Cleanup(func() { - context.Shutdown() - }) + t.Cleanup( + func() { + context.Shutdown() + }, + ) }) t.Run("With failed Stop: parent is dead", func(t *testing.T) { ctx := context.TODO() @@ -912,11 +972,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Test", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -950,11 +1012,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -969,21 +1033,25 @@ func TestReceiveContext(t *testing.T) { // create the child actor childPath := address.New("child", "sys", "host", ports[0]) - child, err := newPID(ctx, childPath, + child, err := newPID( + ctx, childPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) // stop the child actor context.Stop(child) require.Error(t, context.getError()) - t.Cleanup(func() { - context.Shutdown() - assert.NoError(t, child.Shutdown(ctx)) - }) + t.Cleanup( + func() { + context.Shutdown() + assert.NoError(t, child.Shutdown(ctx)) + }, + ) }) t.Run("With Stop when child is already stopped", func(t *testing.T) { ctx := context.TODO() @@ -991,11 +1059,13 @@ func TestReceiveContext(t *testing.T) { actorPath := address.New("Parent", "sys", "host", ports[0]) // create the parent actor - parent, err := newPID(ctx, actorPath, + parent, err := newPID( + ctx, actorPath, newTestSupervisor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, parent) @@ -1021,9 +1091,11 @@ func TestReceiveContext(t *testing.T) { require.Error(t, context.getError()) lib.Pause(time.Second) assert.Empty(t, context.Children()) - t.Cleanup(func() { - context.Shutdown() - }) + t.Cleanup( + func() { + context.Shutdown() + }, + ) }) t.Run("With failed Shutdown", func(t *testing.T) { ctx := context.TODO() @@ -1164,11 +1236,13 @@ func TestReceiveContext(t *testing.T) { assert.EqualValues(t, 1, len(consumer.Topics())) - t.Cleanup(func() { - // shutdown the consumer - consumer.Shutdown() - context.Shutdown() - }) + t.Cleanup( + func() { + // shutdown the consumer + consumer.Shutdown() + context.Shutdown() + }, + ) }) t.Run("With Unhandled with a sender", func(t *testing.T) { ctx := context.TODO() @@ -1235,12 +1309,14 @@ func TestReceiveContext(t *testing.T) { assert.EqualValues(t, 1, len(consumer.Topics())) - t.Cleanup(func() { - require.NoError(t, pid2.Shutdown(ctx)) - // shutdown the consumer - consumer.Shutdown() - context.Shutdown() - }) + t.Cleanup( + func() { + require.NoError(t, pid2.Shutdown(ctx)) + // shutdown the consumer + consumer.Shutdown() + context.Shutdown() + }, + ) }) t.Run("With Unhandled with system messages", func(t *testing.T) { ctx := context.TODO() @@ -1296,11 +1372,13 @@ func TestReceiveContext(t *testing.T) { assert.EqualValues(t, 1, len(consumer.Topics())) - t.Cleanup(func() { - // shutdown the consumer - consumer.Shutdown() - context.Shutdown() - }) + t.Cleanup( + func() { + // shutdown the consumer + consumer.Shutdown() + context.Shutdown() + }, + ) }) t.Run("With successful BatchTell", func(t *testing.T) { ctx := context.TODO() @@ -1519,7 +1597,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -1548,7 +1627,8 @@ func TestReceiveContext(t *testing.T) { // get the address of the exchanger actor one testerAddr := context.RemoteLookup(host, remotingPort, tester) // send the message to t exchanger actor one using remote messaging - context.RemoteBatchTell(address.From(testerAddr), new(testpb.TestSend), new(testpb.TestSend), new(testpb.TestSend)) + messages := []proto.Message{new(testpb.TestSend), new(testpb.TestSend), new(testpb.TestSend)} + context.RemoteBatchTell(address.From(testerAddr), messages) require.NoError(t, context.getError()) // wait for processing to complete on the actor side lib.Pause(500 * time.Millisecond) @@ -1556,10 +1636,12 @@ func TestReceiveContext(t *testing.T) { lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, testerRef.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, testerRef.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With successful RemoteBatchAsk", func(t *testing.T) { // create the context @@ -1572,7 +1654,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -1601,15 +1684,74 @@ func TestReceiveContext(t *testing.T) { // get the address of the exchanger actor one testerAddr := context.RemoteLookup(host, remotingPort, tester) // send the message to t exchanger actor one using remote messaging - replies := context.RemoteBatchAsk(address.From(testerAddr), new(testpb.TestReply), new(testpb.TestReply), new(testpb.TestReply)) + messages := []proto.Message{new(testpb.TestReply), new(testpb.TestReply), new(testpb.TestReply)} + replies := context.RemoteBatchAsk(address.From(testerAddr), messages) require.NoError(t, context.getError()) require.Len(t, replies, 3) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, testerRef.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, testerRef.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }) + t.Run("With RemoteBatchAsk when remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create test actor + tester := "test" + testActor := newTestActor() + testerRef, err := sys.Spawn(ctx, tester, testActor) + require.NoError(t, err) + require.NotNil(t, testerRef) + + // create an instance of receive context + context := &ReceiveContext{ + ctx: ctx, + sender: NoSender, + self: testerRef, + } + + testerRef.remoting = nil + // get the address of the exchanger actor one + testerAddr := context.RemoteLookup(host, remotingPort, tester) + // send the message to t exchanger actor one using remote messaging + messages := []proto.Message{new(testpb.TestReply), new(testpb.TestReply), new(testpb.TestReply)} + replies := context.RemoteBatchAsk(address.From(testerAddr), messages) + err = context.getError() + require.Error(t, err) + require.Empty(t, replies) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + + t.Cleanup( + func() { + assert.NoError(t, testerRef.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With failed RemoteBatchTell", func(t *testing.T) { // create the context @@ -1622,7 +1764,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -1641,11 +1784,13 @@ func TestReceiveContext(t *testing.T) { ports := dynaport.Get(1) actor1 := &exchanger{} actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -1659,19 +1804,82 @@ func TestReceiveContext(t *testing.T) { } // send the message to the exchanger actor one using remote messaging - context.RemoteBatchTell(address.From(&goaktpb.Address{ - Host: "127.0.0.1", - Port: int32(remotingPort), - Name: actorName2, - Id: "", - }), new(testpb.TestRemoteSend)) + context.RemoteBatchTell(address.From( + &goaktpb.Address{ + Host: "127.0.0.1", + Port: int32(remotingPort), + Name: actorName2, + Id: "", + }, + ), []proto.Message{new(testpb.TestRemoteSend)}) require.Error(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }) + t.Run("With RemoteBatchTell when remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create test actor + tester := "test" + testActor := newTestActor() + testerRef, err := sys.Spawn(ctx, tester, testActor) + require.NoError(t, err) + require.NotNil(t, testerRef) + + // create an instance of receive context + context := &ReceiveContext{ + ctx: ctx, + sender: NoSender, + self: testerRef, + } + + // get the address of the exchanger actor one + testerAddr := context.RemoteLookup(host, remotingPort, tester) + + testerRef.remoting = nil + + // send the message to t exchanger actor one using remote messaging + messages := []proto.Message{new(testpb.TestSend), new(testpb.TestSend), new(testpb.TestSend)} + context.RemoteBatchTell(address.From(testerAddr), messages) + err = context.getError() + require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + + lib.Pause(time.Second) + + t.Cleanup( + func() { + assert.NoError(t, testerRef.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With successful RemoteBatchAsk", func(t *testing.T) { // create the context @@ -1684,7 +1892,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -1703,11 +1912,13 @@ func TestReceiveContext(t *testing.T) { ports := dynaport.Get(1) actor1 := &exchanger{} actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -1720,19 +1931,23 @@ func TestReceiveContext(t *testing.T) { self: pid1, } - context.RemoteBatchAsk(address.From(&goaktpb.Address{ - Host: "127.0.0.1", - Port: int32(remotingPort), - Name: actorName2, - Id: "", - }), new(testpb.TestReply)) + context.RemoteBatchAsk(address.From( + &goaktpb.Address{ + Host: "127.0.0.1", + Port: int32(remotingPort), + Name: actorName2, + Id: "", + }, + ), []proto.Message{new(testpb.TestReply)}) require.Error(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With no panics RemoteReSpawn when actor not found", func(t *testing.T) { // create the context @@ -1745,7 +1960,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -1764,11 +1980,13 @@ func TestReceiveContext(t *testing.T) { ports := dynaport.Get(1) actor1 := &exchanger{} actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -1785,10 +2003,12 @@ func TestReceiveContext(t *testing.T) { require.NoError(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With failed RemoteReSpawn", func(t *testing.T) { // create the context @@ -1801,7 +2021,8 @@ func TestReceiveContext(t *testing.T) { host := "127.0.0.1" // create the actor system - sys, err := NewActorSystem("test", + sys, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), ) @@ -1819,11 +2040,13 @@ func TestReceiveContext(t *testing.T) { actor1 := &exchanger{} ports := dynaport.Get(1) actorPath1 := address.New("Exchange1", "sys", "host", ports[0]) - pid1, err := newPID(ctx, + pid1, err := newPID( + ctx, actorPath1, actor1, withInitMaxRetries(1), - withCustomLogger(log.DiscardLogger)) + withCustomLogger(log.DiscardLogger), + ) require.NoError(t, err) require.NotNil(t, pid1) @@ -1840,10 +2063,12 @@ func TestReceiveContext(t *testing.T) { require.Error(t, context.getError()) lib.Pause(time.Second) - t.Cleanup(func() { - assert.NoError(t, pid1.Shutdown(ctx)) - assert.NoError(t, sys.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, pid1.Shutdown(ctx)) + assert.NoError(t, sys.Stop(ctx)) + }, + ) }) t.Run("With successful PipeTo", func(t *testing.T) { askTimeout := time.Minute @@ -1990,9 +2215,11 @@ func TestReceiveContext(t *testing.T) { context.SendAsync(pid2.Name(), new(testpb.TestSend)) require.NoError(t, context.getError()) - t.Cleanup(func() { - assert.NoError(t, actorSystem.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, actorSystem.Stop(ctx)) + }, + ) }) t.Run("With failed SendAsync", func(t *testing.T) { ctx := context.Background() @@ -2030,9 +2257,11 @@ func TestReceiveContext(t *testing.T) { context.SendAsync(pid2.Name(), new(testpb.TestSend)) require.Error(t, context.getError()) - t.Cleanup(func() { - assert.NoError(t, actorSystem.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, actorSystem.Stop(ctx)) + }, + ) }) t.Run("With successful SendSync command", func(t *testing.T) { ctx := context.Background() @@ -2069,9 +2298,11 @@ func TestReceiveContext(t *testing.T) { expected := new(testpb.Reply) assert.True(t, proto.Equal(expected, reply)) - t.Cleanup(func() { - assert.NoError(t, actorSystem.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, actorSystem.Stop(ctx)) + }, + ) }) t.Run("With failed SendSync", func(t *testing.T) { ctx := context.Background() @@ -2110,9 +2341,11 @@ func TestReceiveContext(t *testing.T) { context.SendSync(pid2.Name(), new(testpb.TestReply)) require.Error(t, context.getError()) - t.Cleanup(func() { - assert.NoError(t, actorSystem.Stop(ctx)) - }) + t.Cleanup( + func() { + assert.NoError(t, actorSystem.Stop(ctx)) + }, + ) }) t.Run("With Stash when stash not set", func(t *testing.T) { ctx := context.TODO() diff --git a/actors/redistribution.go b/actors/redistribution.go index f53706e9..9152c3c9 100644 --- a/actors/redistribution.go +++ b/actors/redistribution.go @@ -103,63 +103,67 @@ func (x *actorSystem) redistribute(ctx context.Context, event *cluster.Event) er eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - for _, actor := range leaderActors { - // never redistribute system actors - if isSystemName(actor.GetActorAddress().GetName()) { - continue - } - - x.logger.Debugf("re-creating actor=[(%s) of type (%s)]", actor.GetActorAddress().GetName(), actor.GetActorType()) - iactor, err := x.reflection.ActorFrom(actor.GetActorType()) - if err != nil { - x.logger.Errorf("failed to create actor=[(%s) of type (%s)]: %v", actor.GetActorAddress().GetName(), actor.GetActorType(), err) - return err - } - - if _, err = x.Spawn(ctx, actor.GetActorAddress().GetName(), iactor); err != nil { - x.logger.Errorf("failed to spawn actor=[(%s) of type (%s)]: %v", actor.GetActorAddress().GetName(), actor.GetActorType(), err) - return err - } - - x.logger.Debugf("actor=[(%s) of type (%s)] successfully re-created", actor.GetActorAddress().GetName(), actor.GetActorType()) - } - return nil - }) - - eg.Go(func() error { - // defensive programming - if len(chunks) == 0 { - return nil - } - - for i := 1; i < len(chunks); i++ { - actors := chunks[i] - peer := peers[i-1] - - x.peersCacheMu.RLock() - bytea := x.peersCache[net.JoinHostPort(peer.Host, strconv.Itoa(peer.Port))] - x.peersCacheMu.RUnlock() - - state := new(internalpb.PeerState) - _ = proto.Unmarshal(bytea, state) - - for _, actor := range actors { + eg.Go( + func() error { + for _, actor := range leaderActors { // never redistribute system actors if isSystemName(actor.GetActorAddress().GetName()) { continue } x.logger.Debugf("re-creating actor=[(%s) of type (%s)]", actor.GetActorAddress().GetName(), actor.GetActorType()) - if err := RemoteSpawn(ctx, state.GetHost(), int(state.GetRemotingPort()), actor.GetActorAddress().GetName(), actor.GetActorType()); err != nil { - x.logger.Error(err) + iactor, err := x.reflection.ActorFrom(actor.GetActorType()) + if err != nil { + x.logger.Errorf("failed to create actor=[(%s) of type (%s)]: %v", actor.GetActorAddress().GetName(), actor.GetActorType(), err) return err } + + if _, err = x.Spawn(ctx, actor.GetActorAddress().GetName(), iactor); err != nil { + x.logger.Errorf("failed to spawn actor=[(%s) of type (%s)]: %v", actor.GetActorAddress().GetName(), actor.GetActorType(), err) + return err + } + x.logger.Debugf("actor=[(%s) of type (%s)] successfully re-created", actor.GetActorAddress().GetName(), actor.GetActorType()) } - } - return nil - }) + return nil + }, + ) + + eg.Go( + func() error { + // defensive programming + if len(chunks) == 0 { + return nil + } + + for i := 1; i < len(chunks); i++ { + actors := chunks[i] + peer := peers[i-1] + + x.peersCacheMu.RLock() + bytea := x.peersCache[net.JoinHostPort(peer.Host, strconv.Itoa(peer.Port))] + x.peersCacheMu.RUnlock() + + peerState := new(internalpb.PeerState) + _ = proto.Unmarshal(bytea, peerState) + + for _, actor := range actors { + // never redistribute system actors + if isSystemName(actor.GetActorAddress().GetName()) { + continue + } + + x.logger.Debugf("re-creating actor=[(%s) of type (%s)]", actor.GetActorAddress().GetName(), actor.GetActorType()) + if err := x.remoting.RemoteSpawn(ctx, peerState.GetHost(), int(peerState.GetRemotingPort()), actor.GetActorAddress().GetName(), actor.GetActorType()); err != nil { + x.logger.Error(err) + return err + } + x.logger.Debugf("actor=[(%s) of type (%s)] successfully re-created", actor.GetActorAddress().GetName(), actor.GetActorType()) + } + } + return nil + }, + ) return eg.Wait() } diff --git a/actors/remoting.go b/actors/remoting.go new file mode 100644 index 00000000..c7de2989 --- /dev/null +++ b/actors/remoting.go @@ -0,0 +1,359 @@ +/* + * MIT License + * + * Copyright (c) 2022-2024 Tochemey + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package actors + +import ( + "context" + "errors" + nethttp "net/http" + "strings" + "time" + + "connectrpc.com/connect" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + + "github.com/tochemey/goakt/v2/address" + "github.com/tochemey/goakt/v2/internal/http" + "github.com/tochemey/goakt/v2/internal/internalpb" + "github.com/tochemey/goakt/v2/internal/internalpb/internalpbconnect" +) + +// Remoting defines the Remoting APIs +// This requires Remoting is enabled on the connected actor system +type Remoting struct { + client *nethttp.Client +} + +// NewRemoting creates an instance Remoting with an insecure connection. To use a secure connection +// one need to call the WithTLS method of the remoting instance to set the certificates of the secure connection +// This requires Remoting is enabled on the connected actor system +// Make sure to call Close to free up resources otherwise you may be leaking socket connections +// +// One can also override the remoting option when calling any of the method for custom one. +func NewRemoting() *Remoting { + r := &Remoting{ + client: http.NewClient(), + } + return r +} + +// RemoteTell sends a message to an actor remotely without expecting any reply +func (r *Remoting) RemoteTell(ctx context.Context, to *address.Address, message proto.Message) error { + marshaled, err := anypb.New(message) + if err != nil { + return ErrInvalidMessage(err) + } + + remoteClient := r.Client(to.GetHost(), int(to.GetPort())) + request := &internalpb.RemoteTellRequest{ + RemoteMessage: &internalpb.RemoteMessage{ + Sender: address.NoSender, + Receiver: to.Address, + Message: marshaled, + }, + } + + stream := remoteClient.RemoteTell(ctx) + if err := stream.Send(request); err != nil { + if eof(err) { + if _, err := stream.CloseAndReceive(); err != nil { + return err + } + return nil + } + return err + } + + // close the connection + if _, err := stream.CloseAndReceive(); err != nil { + return err + } + + return nil +} + +// RemoteAsk sends a synchronous message to another actor remotely and expect a response. +func (r *Remoting) RemoteAsk(ctx context.Context, to *address.Address, message proto.Message, timeout time.Duration) (response *anypb.Any, err error) { + marshaled, err := anypb.New(message) + if err != nil { + return nil, ErrInvalidMessage(err) + } + + remoteClient := r.Client(to.GetHost(), int(to.GetPort())) + request := &internalpb.RemoteAskRequest{ + RemoteMessage: &internalpb.RemoteMessage{ + Sender: address.NoSender, + Receiver: to.Address, + Message: marshaled, + }, + Timeout: durationpb.New(timeout), + } + stream := remoteClient.RemoteAsk(ctx) + errc := make(chan error, 1) + + go func() { + defer close(errc) + for { + resp, err := stream.Receive() + if err != nil { + errc <- err + return + } + + response = resp.GetMessage() + } + }() + + err = stream.Send(request) + if err != nil { + return nil, err + } + + if err := stream.CloseRequest(); err != nil { + return nil, err + } + + err = <-errc + if eof(err) { + return response, nil + } + + if err != nil { + return nil, err + } + + return +} + +// RemoteLookup look for an actor address on a remote node. +func (r *Remoting) RemoteLookup(ctx context.Context, host string, port int, name string) (addr *address.Address, err error) { + remoteClient := r.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteLookupRequest{ + Host: host, + Port: int32(port), + Name: name, + }, + ) + + response, err := remoteClient.RemoteLookup(ctx, request) + if err != nil { + code := connect.CodeOf(err) + if code == connect.CodeNotFound { + return nil, nil + } + return nil, err + } + + return address.From(response.Msg.GetAddress()), nil +} + +// RemoteBatchTell sends bulk asynchronous messages to an actor +func (r *Remoting) RemoteBatchTell(ctx context.Context, to *address.Address, messages []proto.Message) error { + var requests []*internalpb.RemoteTellRequest + for _, message := range messages { + packed, err := anypb.New(message) + if err != nil { + return ErrInvalidMessage(err) + } + + requests = append( + requests, &internalpb.RemoteTellRequest{ + RemoteMessage: &internalpb.RemoteMessage{ + Sender: address.NoSender, + Receiver: to.Address, + Message: packed, + }, + }, + ) + } + + remoteClient := r.Client(to.GetHost(), int(to.GetPort())) + stream := remoteClient.RemoteTell(ctx) + for _, request := range requests { + err := stream.Send(request) + if eof(err) { + if _, err := stream.CloseAndReceive(); err != nil { + return err + } + return nil + } + + if err != nil { + return err + } + } + + // close the connection + if _, err := stream.CloseAndReceive(); err != nil { + return err + } + + return nil +} + +// RemoteBatchAsk sends bulk messages to an actor with responses expected +func (r *Remoting) RemoteBatchAsk(ctx context.Context, to *address.Address, messages []proto.Message) (responses []*anypb.Any, err error) { + var requests []*internalpb.RemoteAskRequest + for _, message := range messages { + packed, err := anypb.New(message) + if err != nil { + return nil, ErrInvalidMessage(err) + } + + requests = append( + requests, &internalpb.RemoteAskRequest{ + RemoteMessage: &internalpb.RemoteMessage{ + Sender: address.NoSender, + Receiver: to.Address, + Message: packed, + }, + }, + ) + } + + remoteClient := r.Client(to.GetHost(), int(to.GetPort())) + stream := remoteClient.RemoteAsk(ctx) + errc := make(chan error, 1) + + go func() { + defer close(errc) + for { + resp, err := stream.Receive() + if err != nil { + errc <- err + return + } + + responses = append(responses, resp.GetMessage()) + } + }() + + for _, request := range requests { + err := stream.Send(request) + if err != nil { + return nil, err + } + } + + if err := stream.CloseRequest(); err != nil { + return nil, err + } + + err = <-errc + if eof(err) { + return responses, nil + } + + if err != nil { + return nil, err + } + + return +} + +// RemoteSpawn creates an actor on a remote node. The given actor needs to be registered on the remote node using the Register method of ActorSystem +func (r *Remoting) RemoteSpawn(ctx context.Context, host string, port int, name, actorType string) error { + remoteClient := r.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteSpawnRequest{ + Host: host, + Port: int32(port), + ActorName: name, + ActorType: actorType, + }, + ) + + if _, err := remoteClient.RemoteSpawn(ctx, request); err != nil { + code := connect.CodeOf(err) + if code == connect.CodeFailedPrecondition { + var connectErr *connect.Error + errors.As(err, &connectErr) + e := connectErr.Unwrap() + if strings.Contains(e.Error(), ErrTypeNotRegistered.Error()) { + return ErrTypeNotRegistered + } + } + return err + } + return nil +} + +// RemoteReSpawn restarts actor on a remote node. +func (r *Remoting) RemoteReSpawn(ctx context.Context, host string, port int, name string) error { + remoteClient := r.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteReSpawnRequest{ + Host: host, + Port: int32(port), + Name: name, + }, + ) + + if _, err := remoteClient.RemoteReSpawn(ctx, request); err != nil { + code := connect.CodeOf(err) + if code == connect.CodeNotFound { + return nil + } + return err + } + + return nil +} + +// RemoteStop stops an actor on a remote node. +func (r *Remoting) RemoteStop(ctx context.Context, host string, port int, name string) error { + remoteClient := r.Client(host, port) + request := connect.NewRequest( + &internalpb.RemoteStopRequest{ + Host: host, + Port: int32(port), + Name: name, + }, + ) + + if _, err := remoteClient.RemoteStop(ctx, request); err != nil { + code := connect.CodeOf(err) + if code == connect.CodeNotFound { + return nil + } + return err + } + + return nil +} + +// Close closes the Client connection +func (r *Remoting) Close() { + r.client.CloseIdleConnections() +} + +// Client returns a Remoting service client instance +func (r *Remoting) Client(host string, port int) internalpbconnect.RemotingServiceClient { + endpoint := http.URL(host, port) + return internalpbconnect.NewRemotingServiceClient(r.client, endpoint) +} diff --git a/actors/remoting_test.go b/actors/remoting_test.go new file mode 100644 index 00000000..d3cc4b6d --- /dev/null +++ b/actors/remoting_test.go @@ -0,0 +1,1524 @@ +/* + * MIT License + * + * Copyright (c) 2022-2024 Tochemey + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package actors + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/travisjeffery/go-dynaport" + "google.golang.org/protobuf/proto" + + "github.com/tochemey/goakt/v2/address" + "github.com/tochemey/goakt/v2/goaktpb" + "github.com/tochemey/goakt/v2/internal/lib" + "github.com/tochemey/goakt/v2/log" + "github.com/tochemey/goakt/v2/test/data/testpb" +) + +func TestRemoteTell(t *testing.T) { + t.Run( + "With happy path", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + for i := 0; i < 10; i++ { + err = remoting.RemoteTell(ctx, addr, message) + // perform some assertions + require.NoError(t, err) + } + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With invalid message", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + err = remoting.RemoteTell(ctx, addr, nil) + // perform some assertions + require.Error(t, err) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With remote service failure", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // create a wrong address + addr := &goaktpb.Address{ + Host: host, + Port: 2222, + Name: "", + Id: "", + } + + remoting := NewRemoting() + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = remoting.RemoteTell(ctx, address.From(addr), message) + // perform some assertions + require.Error(t, err) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With remoting disabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // let us disable remoting + actorsSystem := sys.(*actorSystem) + actorsSystem.remotingEnabled.Store(false) + + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = remoting.RemoteTell(ctx, addr, message) + // perform some assertions + require.Error(t, err) + require.EqualError(t, err, "failed_precondition: remoting is not enabled") + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With Batch request", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + // create a message to send to the test actor + messages := make([]proto.Message, 10) + // send the message to the actor + for i := 0; i < 10; i++ { + messages[i] = new(testpb.TestSend) + } + + err = remoting.RemoteBatchTell(ctx, addr, messages) + require.NoError(t, err) + + // wait for processing to complete on the actor side + lib.Pause(500 * time.Millisecond) + require.EqualValues(t, 10, actorRef.ProcessedCount()-1) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With Batch service failure", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // create a wrong address + addr := &goaktpb.Address{ + Host: host, + Port: 2222, + Name: "", + Id: "", + } + + remoting := NewRemoting() + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = remoting.RemoteBatchTell(ctx, address.From(addr), []proto.Message{message}) + // perform some assertions + require.Error(t, err) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With Batch when remoting is disabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // let us disable remoting + actorsSystem := sys.(*actorSystem) + actorsSystem.remotingEnabled.Store(false) + + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = remoting.RemoteBatchTell(ctx, addr, []proto.Message{message}) + // perform some assertions + require.Error(t, err) + require.EqualError(t, err, "failed_precondition: remoting is not enabled") + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With actor not found", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // stop the actor when wait for cleanup to take place + require.NoError(t, actorRef.Shutdown(ctx)) + lib.Pause(time.Second) + + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = remoting.RemoteTell(ctx, addr, message) + // perform some assertions + require.Error(t, err) + require.Contains(t, err.Error(), "not found") + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With Batch actor not found", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // stop the actor when wait for cleanup to take place + require.NoError(t, actorRef.Shutdown(ctx)) + lib.Pause(time.Second) + + // create a message to send to the test actor + message := new(testpb.TestSend) + // send the message to the actor + err = remoting.RemoteBatchTell(ctx, addr, []proto.Message{message}) + // perform some assertions + require.Error(t, err) + require.Contains(t, err.Error(), "not found") + + // stop the actor after some time + lib.Pause(time.Second) + + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) +} + +func TestRemoteAsk(t *testing.T) { + t.Run( + "With happy path", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := remoting.RemoteAsk(ctx, addr, message, time.Minute) + // perform some assertions + require.NoError(t, err) + require.NotNil(t, reply) + require.True(t, reply.MessageIs(new(testpb.Reply))) + + actual := new(testpb.Reply) + err = reply.UnmarshalTo(actual) + require.NoError(t, err) + + expected := &testpb.Reply{Content: "received message"} + assert.True(t, proto.Equal(expected, actual)) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With invalid message", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // send the message to the actor + reply, err := remoting.RemoteAsk(ctx, addr, nil, time.Minute) + // perform some assertions + require.Error(t, err) + require.Nil(t, reply) + + // stop the actor after some time + lib.Pause(time.Second) + + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With remote service failure", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // get the address of the actor + addr := &goaktpb.Address{ + Host: host, + Port: 2222, + Name: "", + Id: "", + } + + remoting := NewRemoting() + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := remoting.RemoteAsk(ctx, address.From(addr), message, time.Minute) + // perform some assertions + require.Error(t, err) + require.Nil(t, reply) + + // stop the actor after some time + lib.Pause(time.Second) + remoting.Close() + t.Cleanup( + func() { + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }, + ) + t.Run( + "With remoting disabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // let us disable remoting + actorsSystem := sys.(*actorSystem) + actorsSystem.remotingEnabled.Store(false) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := remoting.RemoteAsk(ctx, addr, message, time.Minute) + // perform some assertions + require.Error(t, err) + require.EqualError(t, err, "failed_precondition: remoting is not enabled") + require.Nil(t, reply) + + // stop the actor after some time + lib.Pause(time.Second) + + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With Batch request", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + replies, err := remoting.RemoteBatchAsk(ctx, addr, []proto.Message{message}) + // perform some assertions + require.NoError(t, err) + require.Len(t, replies, 1) + require.NotNil(t, replies[0]) + require.True(t, replies[0].MessageIs(new(testpb.Reply))) + + actual := new(testpb.Reply) + err = replies[0].UnmarshalTo(actual) + require.NoError(t, err) + + expected := &testpb.Reply{Content: "received message"} + assert.True(t, proto.Equal(expected, actual)) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With Batch service failure", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // get the address of the actor + addr := &goaktpb.Address{ + Host: host, + Port: 2222, + Name: "", + Id: "", + } + + remoting := NewRemoting() + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := remoting.RemoteBatchAsk(ctx, address.From(addr), []proto.Message{message}) + // perform some assertions + require.Error(t, err) + require.Nil(t, reply) + + remoting.Close() + // stop the actor after some time + lib.Pause(time.Second) + t.Cleanup( + func() { + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }, + ) + t.Run( + "With Batch when remoting is disabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // let us disable remoting + actorsSystem := sys.(*actorSystem) + actorsSystem.remotingEnabled.Store(false) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := remoting.RemoteBatchAsk(ctx, addr, []proto.Message{message}) + // perform some assertions + require.Error(t, err) + require.EqualError(t, err, "failed_precondition: remoting is not enabled") + require.Nil(t, reply) + + // stop the actor after some time + lib.Pause(time.Second) + remoting.Close() + t.Cleanup( + func() { + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }, + ) + t.Run( + "With actor not found", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithJanitorInterval(30*time.Millisecond), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // stop the actor when wait for cleanup to take place + require.NoError(t, actorRef.Shutdown(ctx)) + lib.Pause(time.Second) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := remoting.RemoteAsk(ctx, addr, message, time.Minute) + // perform some assertions + require.Error(t, err) + require.Contains(t, err.Error(), "not found") + require.Nil(t, reply) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + t.Run( + "With Batch actor not found", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithJanitorInterval(30*time.Millisecond), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + remoting := NewRemoting() + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + // stop the actor when wait for cleanup to take place + require.NoError(t, actorRef.Shutdown(ctx)) + lib.Pause(time.Second) + + // create a message to send to the test actor + message := new(testpb.TestReply) + // send the message to the actor + reply, err := remoting.RemoteBatchAsk(ctx, addr, []proto.Message{message}) + // perform some assertions + require.Error(t, err) + require.Contains(t, err.Error(), "not found") + require.Nil(t, reply) + + // stop the actor after some time + lib.Pause(time.Second) + + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) +} + +func TestAPIRemoteLookup(t *testing.T) { + t.Run( + "When remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // let us disable remoting + actorsSystem := sys.(*actorSystem) + actorsSystem.remotingEnabled.Store(false) + remoting := NewRemoting() + // create a test actor + actorName := "test" + // get the address of the actor + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.Error(t, err) + require.EqualError(t, err, "failed_precondition: remoting is not enabled") + require.Nil(t, addr) + + t.Cleanup( + func() { + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }, + ) +} + +func TestAPIRemoteReSpawn(t *testing.T) { + t.Run( + "When remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // let us disable remoting + actorsSystem := sys.(*actorSystem) + actorsSystem.remotingEnabled.Store(false) + remoting := NewRemoting() + // create a test actor + actorName := "test" + // get the address of the actor + err = remoting.RemoteReSpawn(ctx, host, remotingPort, actorName) + require.Error(t, err) + require.EqualError(t, err, "failed_precondition: remoting is not enabled") + + remoting.Close() + t.Cleanup( + func() { + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }, + ) + t.Run( + "When remoting is enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // assert the actor restart count + pid := actorRef + assert.Zero(t, pid.restartCount.Load()) + remoting := NewRemoting() + // get the address of the actor + err = remoting.RemoteReSpawn(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + assert.EqualValues(t, 1, pid.restartCount.Load()) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) +} + +func TestAPIRemoteStop(t *testing.T) { + t.Run( + "When remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // let us disable remoting + actorsSystem := sys.(*actorSystem) + actorsSystem.remotingEnabled.Store(false) + + remoting := NewRemoting() + // create a test actor + actorName := "test" + // get the address of the actor + err = remoting.RemoteStop(ctx, host, remotingPort, actorName) + require.Error(t, err) + require.EqualError(t, err, "failed_precondition: remoting is not enabled") + + remoting.Close() + t.Cleanup( + func() { + assert.NoError(t, sys.Stop(ctx)) + }, + ) + }, + ) + t.Run( + "When remoting is enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + lib.Pause(time.Second) + + // create a test actor + actorName := "test" + actor := newTestActor() + actorRef, err := sys.Spawn(ctx, actorName, actor) + require.NoError(t, err) + assert.NotNil(t, actorRef) + + // assert the actor restart count + pid := actorRef + assert.Zero(t, pid.restartCount.Load()) + + remoting := NewRemoting() + + // get the address of the actor + err = remoting.RemoteStop(ctx, host, remotingPort, actorName) + require.NoError(t, err) + + assert.Empty(t, sys.Actors()) + + // stop the actor after some time + lib.Pause(time.Second) + + remoting.Close() + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) +} + +func TestAPIRemoteSpawn(t *testing.T) { + t.Run( + "When remoting is enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + ports := dynaport.Get(1) + remotingPort := ports[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create an actor implementation and register it + actor := &exchanger{} + actorName := uuid.NewString() + + remoting := NewRemoting() + // fetching the address of the that actor should return nil address + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + require.Nil(t, addr) + + // register the actor + err = sys.Register(ctx, actor) + require.NoError(t, err) + + // spawn the remote actor + err = remoting.RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") + require.NoError(t, err) + + // re-fetching the address of the actor should return not nil address after start + addr, err = remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + require.NotNil(t, addr) + + // send the message to exchanger actor one using remote messaging + reply, err := remoting.RemoteAsk(ctx, addr, new(testpb.TestReply), time.Minute) + + require.NoError(t, err) + require.NotNil(t, reply) + require.True(t, reply.MessageIs(new(testpb.Reply))) + + actual := new(testpb.Reply) + err = reply.UnmarshalTo(actual) + require.NoError(t, err) + + expected := new(testpb.Reply) + assert.True(t, proto.Equal(expected, actual)) + + remoting.Close() + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + + t.Run( + "When actor not registered", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + ports := dynaport.Get(1) + remotingPort := ports[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + WithRemoting(host, int32(remotingPort)), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create an actor implementation and register it + actorName := uuid.NewString() + + remoting := NewRemoting() + // fetching the address of the that actor should return nil address + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) + require.NoError(t, err) + require.Nil(t, addr) + + // spawn the remote actor + err = remoting.RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") + require.Error(t, err) + assert.EqualError(t, err, ErrTypeNotRegistered.Error()) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) + + t.Run( + "When remoting is not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + ports := dynaport.Get(1) + remotingPort := ports[0] + host := "127.0.0.1" + + // create the actor system + sys, err := NewActorSystem( + "test", + WithLogger(logger), + WithPassivationDisabled(), + ) + // assert there are no error + require.NoError(t, err) + + // start the actor system + err = sys.Start(ctx) + assert.NoError(t, err) + + // create an actor implementation and register it + actorName := uuid.NewString() + remoting := NewRemoting() + // spawn the remote actor + err = remoting.RemoteSpawn(ctx, host, remotingPort, actorName, "actors.exchanger") + require.Error(t, err) + + t.Cleanup( + func() { + err = sys.Stop(ctx) + assert.NoError(t, err) + }, + ) + }, + ) +} diff --git a/actors/scheduler.go b/actors/scheduler.go index 353a4eb9..a208859b 100644 --- a/actors/scheduler.go +++ b/actors/scheduler.go @@ -57,6 +57,13 @@ func withSchedulerCluster(cluster cluster.Interface) schedulerOption { } } +// withSchedulerTls sets the TLS setting +func withSchedulerRemoting(remoting *Remoting) schedulerOption { + return func(scheduler *scheduler) { + scheduler.remoting = remoting + } +} + // scheduler defines the Go-Akt scheduler. // Its job is to help stack messages that will be delivered in the future to actors. type scheduler struct { @@ -71,6 +78,7 @@ type scheduler struct { // define the shutdown timeout stopTimeout time.Duration cluster cluster.Interface + remoting *Remoting } // newScheduler creates an instance of scheduler @@ -132,12 +140,14 @@ func (x *scheduler) ScheduleOnce(ctx context.Context, message proto.Message, pid return ErrSchedulerNotStarted } - job := job.NewFunctionJob[bool](func(ctx context.Context) (bool, error) { - if err := Tell(ctx, pid, message); err != nil { - return false, err - } - return true, nil - }) + job := job.NewFunctionJob[bool]( + func(ctx context.Context) (bool, error) { + if err := Tell(ctx, pid, message); err != nil { + return false, err + } + return true, nil + }, + ) jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(pid.Address().String())) if err := x.distributeJobKeyOrNot(ctx, jobDetails); err != nil { @@ -159,12 +169,14 @@ func (x *scheduler) Schedule(ctx context.Context, message proto.Message, pid *PI return ErrSchedulerNotStarted } - job := job.NewFunctionJob[bool](func(ctx context.Context) (bool, error) { - if err := Tell(ctx, pid, message); err != nil { - return false, err - } - return true, nil - }) + job := job.NewFunctionJob[bool]( + func(ctx context.Context) (bool, error) { + if err := Tell(ctx, pid, message); err != nil { + return false, err + } + return true, nil + }, + ) jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(pid.Address().String())) if err := x.distributeJobKeyOrNot(ctx, jobDetails); err != nil { @@ -177,52 +189,65 @@ func (x *scheduler) Schedule(ctx context.Context, message proto.Message, pid *PI return x.quartzScheduler.ScheduleJob(jobDetails, quartz.NewSimpleTrigger(interval)) } -// RemoteScheduleOnce schedules a message to be sent to a remote actor in the future. -// This requires remoting to be enabled on the actor system. -// This will send the given message to the actor after the given interval specified -// The message will be sent once -func (x *scheduler) RemoteScheduleOnce(ctx context.Context, message proto.Message, address *address.Address, interval time.Duration) error { +// ScheduleWithCron schedules a message to be sent to an actor in the future using a cron expression. +func (x *scheduler) ScheduleWithCron(ctx context.Context, message proto.Message, pid *PID, cronExpression string) error { x.mu.Lock() defer x.mu.Unlock() - if !x.started.Load() { return ErrSchedulerNotStarted } - job := job.NewFunctionJob[bool](func(ctx context.Context) (bool, error) { - if err := RemoteTell(ctx, address, message); err != nil { - return false, err - } - return true, nil - }) - key := fmt.Sprintf("%s@%s", address.GetName(), net.JoinHostPort(address.GetHost(), strconv.Itoa(int(address.GetPort())))) - jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(key)) + job := job.NewFunctionJob[bool]( + func(ctx context.Context) (bool, error) { + if err := Tell(ctx, pid, message); err != nil { + return false, err + } + return true, nil + }, + ) + + jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(pid.Address().String())) if err := x.distributeJobKeyOrNot(ctx, jobDetails); err != nil { if errors.Is(err, errSkipJobScheduling) { return nil } return err } - // schedule the job - return x.quartzScheduler.ScheduleJob(jobDetails, quartz.NewRunOnceTrigger(interval)) + + location := time.Now().Location() + trigger, err := quartz.NewCronTriggerWithLoc(cronExpression, location) + if err != nil { + x.logger.Error(fmt.Errorf("failed to schedule message: %w", err)) + return err + } + + return x.quartzScheduler.ScheduleJob(jobDetails, trigger) } -// RemoteSchedule schedules a message to be sent to a remote actor in the future. +// RemoteScheduleOnce schedules a message to be sent to a remote actor in the future. // This requires remoting to be enabled on the actor system. -// This will send the given message to the actor at the given interval specified -func (x *scheduler) RemoteSchedule(ctx context.Context, message proto.Message, address *address.Address, interval time.Duration) error { +// This will send the given message to the actor after the given interval specified +// The message will be sent once +func (x *scheduler) RemoteScheduleOnce(ctx context.Context, message proto.Message, address *address.Address, interval time.Duration) error { x.mu.Lock() defer x.mu.Unlock() if !x.started.Load() { return ErrSchedulerNotStarted } - job := job.NewFunctionJob[bool](func(ctx context.Context) (bool, error) { - if err := RemoteTell(ctx, address, message); err != nil { - return false, err - } - return true, nil - }) + + if x.remoting == nil { + return ErrRemotingDisabled + } + + job := job.NewFunctionJob[bool]( + func(ctx context.Context) (bool, error) { + if err := x.remoting.RemoteTell(ctx, address, message); err != nil { + return false, err + } + return true, nil + }, + ) key := fmt.Sprintf("%s@%s", address.GetName(), net.JoinHostPort(address.GetHost(), strconv.Itoa(int(address.GetPort())))) jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(key)) @@ -233,40 +258,43 @@ func (x *scheduler) RemoteSchedule(ctx context.Context, message proto.Message, a return err } // schedule the job - return x.quartzScheduler.ScheduleJob(jobDetails, quartz.NewSimpleTrigger(interval)) + return x.quartzScheduler.ScheduleJob(jobDetails, quartz.NewRunOnceTrigger(interval)) } -// ScheduleWithCron schedules a message to be sent to an actor in the future using a cron expression. -func (x *scheduler) ScheduleWithCron(ctx context.Context, message proto.Message, pid *PID, cronExpression string) error { +// RemoteSchedule schedules a message to be sent to a remote actor in the future. +// This requires remoting to be enabled on the actor system. +// This will send the given message to the actor at the given interval specified +func (x *scheduler) RemoteSchedule(ctx context.Context, message proto.Message, address *address.Address, interval time.Duration) error { x.mu.Lock() defer x.mu.Unlock() + if !x.started.Load() { return ErrSchedulerNotStarted } - job := job.NewFunctionJob[bool](func(ctx context.Context) (bool, error) { - if err := Tell(ctx, pid, message); err != nil { - return false, err - } - return true, nil - }) + if x.remoting == nil { + return ErrRemotingDisabled + } - jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(pid.Address().String())) + job := job.NewFunctionJob[bool]( + func(ctx context.Context) (bool, error) { + if err := x.remoting.RemoteTell(ctx, address, message); err != nil { + return false, err + } + return true, nil + }, + ) + + key := fmt.Sprintf("%s@%s", address.GetName(), net.JoinHostPort(address.GetHost(), strconv.Itoa(int(address.GetPort())))) + jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(key)) if err := x.distributeJobKeyOrNot(ctx, jobDetails); err != nil { if errors.Is(err, errSkipJobScheduling) { return nil } return err } - - location := time.Now().Location() - trigger, err := quartz.NewCronTriggerWithLoc(cronExpression, location) - if err != nil { - x.logger.Error(fmt.Errorf("failed to schedule message: %w", err)) - return err - } - - return x.quartzScheduler.ScheduleJob(jobDetails, trigger) + // schedule the job + return x.quartzScheduler.ScheduleJob(jobDetails, quartz.NewSimpleTrigger(interval)) } // RemoteScheduleWithCron schedules a message to be sent to an actor in the future using a cron expression. @@ -278,12 +306,18 @@ func (x *scheduler) RemoteScheduleWithCron(ctx context.Context, message proto.Me return ErrSchedulerNotStarted } - job := job.NewFunctionJob[bool](func(ctx context.Context) (bool, error) { - if err := RemoteTell(ctx, address, message); err != nil { - return false, err - } - return true, nil - }) + if x.remoting == nil { + return ErrRemotingDisabled + } + + job := job.NewFunctionJob[bool]( + func(ctx context.Context) (bool, error) { + if err := x.remoting.RemoteTell(ctx, address, message); err != nil { + return false, err + } + return true, nil + }, + ) key := fmt.Sprintf("%s@%s", address.GetName(), net.JoinHostPort(address.GetHost(), strconv.Itoa(int(address.GetPort())))) jobDetails := quartz.NewJobDetail(job, quartz.NewJobKey(key)) diff --git a/actors/scheduler_test.go b/actors/scheduler_test.go index 1085f710..9b28bf2f 100644 --- a/actors/scheduler_test.go +++ b/actors/scheduler_test.go @@ -46,9 +46,11 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -89,10 +91,12 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithJanitorInterval(time.Minute), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -143,7 +147,8 @@ func TestScheduler(t *testing.T) { newTestActor(), withInitMaxRetries(1), withCustomLogger(log.DiscardLogger), - withAskTimeout(replyTimeout)) + withAskTimeout(replyTimeout), + ) require.NoError(t, err) assert.NotNil(t, pid) @@ -167,7 +172,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -188,8 +194,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) // send a message to the actor after 100 ms @@ -205,6 +212,7 @@ func TestScheduler(t *testing.T) { assert.Empty(t, keys) assert.EqualValues(t, 1, actorRef.ProcessedCount()-1) + remoting.Close() // stop the actor err = newActorSystem.Stop(ctx) assert.NoError(t, err) @@ -220,7 +228,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -245,8 +254,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) // send a message to the actor after 100 ms @@ -265,9 +275,11 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -307,9 +319,11 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -345,9 +359,11 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -393,7 +409,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -414,8 +431,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) // send a message to the actor after 100 ms @@ -444,7 +462,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -465,8 +484,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) // send a message to the actor after 100 ms @@ -491,7 +511,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -515,8 +536,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) // send a message to the actor after 100 ms @@ -537,9 +559,11 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -573,8 +597,6 @@ func TestScheduler(t *testing.T) { lib.Pause(500 * time.Millisecond) require.EqualValues(t, 1, actorRef.ProcessedCount()-1) - lib.Pause(500 * time.Millisecond) - require.EqualValues(t, 2, actorRef.ProcessedCount()-1) // stop the actor err = newActorSystem.Stop(ctx) @@ -586,10 +608,12 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithJanitorInterval(time.Minute), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -635,9 +659,11 @@ func TestScheduler(t *testing.T) { // define the logger to use logger := log.DiscardLogger // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), - WithPassivationDisabled()) + WithPassivationDisabled(), + ) // assert there are no error require.NoError(t, err) @@ -680,7 +706,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -701,8 +728,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) // send a message to the actor after 100 ms @@ -738,7 +766,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithRemoting(host, int32(remotingPort)), @@ -763,8 +792,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) // send a message to the actor after 100 ms @@ -788,7 +818,8 @@ func TestScheduler(t *testing.T) { host := "0.0.0.0" // create the actor system - newActorSystem, err := NewActorSystem("test", + newActorSystem, err := NewActorSystem( + "test", WithLogger(logger), WithPassivationDisabled(), WithJanitorInterval(time.Minute), @@ -810,8 +841,9 @@ func TestScheduler(t *testing.T) { require.NoError(t, err) assert.NotNil(t, actorRef) + remoting := NewRemoting() // get the address of the actor - addr, err := RemoteLookup(ctx, host, remotingPort, actorName) + addr, err := remoting.RemoteLookup(ctx, host, remotingPort, actorName) require.NoError(t, err) require.NoError(t, newActorSystem.Kill(ctx, actorName)) @@ -837,4 +869,73 @@ func TestScheduler(t *testing.T) { err = newActorSystem.Stop(ctx) assert.NoError(t, err) }) + t.Run("With RemoteScheduleWithCron with cron expression when remoting not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + scheduler := newScheduler(logger, time.Second) + scheduler.Start(ctx) + + addr := address.New("test", "test", host, remotingPort) + + // send a message to the actor after 100 ms + message := new(testpb.TestSend) + // set cron expression to run every second + const expr = "* * * ? * *" + err := scheduler.RemoteScheduleWithCron(ctx, message, addr, expr) + require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + + scheduler.Stop(ctx) + }) + t.Run("With RemoteSchedule when remoting not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + scheduler := newScheduler(logger, time.Second) + scheduler.Start(ctx) + + addr := address.New("test", "test", host, remotingPort) + + // send a message to the actor after 100 ms + message := new(testpb.TestSend) + err := scheduler.RemoteSchedule(ctx, message, addr, time.Second) + require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + + scheduler.Stop(ctx) + }) + t.Run("With RemoteScheduleOnce when remoting not enabled", func(t *testing.T) { + // create the context + ctx := context.TODO() + // define the logger to use + logger := log.DiscardLogger + // generate the remoting port + nodePorts := dynaport.Get(1) + remotingPort := nodePorts[0] + host := "0.0.0.0" + + scheduler := newScheduler(logger, time.Second) + scheduler.Start(ctx) + + addr := address.New("test", "test", host, remotingPort) + // send a message to the actor after 100 ms + message := new(testpb.TestSend) + err := scheduler.RemoteScheduleOnce(ctx, message, addr, time.Second) + require.Error(t, err) + assert.EqualError(t, err, ErrRemotingDisabled.Error()) + scheduler.Stop(ctx) + }) } diff --git a/bench/benchmarkpb/benchmark.pb.go b/bench/benchmarkpb/benchmark.pb.go index 681c7999..01ec7920 100644 --- a/bench/benchmarkpb/benchmark.pb.go +++ b/bench/benchmarkpb/benchmark.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: benchmark/benchmark.proto @@ -28,11 +28,9 @@ type BenchTell struct { func (x *BenchTell) Reset() { *x = BenchTell{} - if protoimpl.UnsafeEnabled { - mi := &file_benchmark_benchmark_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_benchmark_benchmark_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BenchTell) String() string { @@ -43,7 +41,7 @@ func (*BenchTell) ProtoMessage() {} func (x *BenchTell) ProtoReflect() protoreflect.Message { mi := &file_benchmark_benchmark_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,11 +64,9 @@ type BenchRequest struct { func (x *BenchRequest) Reset() { *x = BenchRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_benchmark_benchmark_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_benchmark_benchmark_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BenchRequest) String() string { @@ -81,7 +77,7 @@ func (*BenchRequest) ProtoMessage() {} func (x *BenchRequest) ProtoReflect() protoreflect.Message { mi := &file_benchmark_benchmark_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -104,11 +100,9 @@ type BenchResponse struct { func (x *BenchResponse) Reset() { *x = BenchResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_benchmark_benchmark_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_benchmark_benchmark_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BenchResponse) String() string { @@ -119,7 +113,7 @@ func (*BenchResponse) ProtoMessage() {} func (x *BenchResponse) ProtoReflect() protoreflect.Message { mi := &file_benchmark_benchmark_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -142,11 +136,9 @@ type Ping struct { func (x *Ping) Reset() { *x = Ping{} - if protoimpl.UnsafeEnabled { - mi := &file_benchmark_benchmark_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_benchmark_benchmark_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Ping) String() string { @@ -157,7 +149,7 @@ func (*Ping) ProtoMessage() {} func (x *Ping) ProtoReflect() protoreflect.Message { mi := &file_benchmark_benchmark_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -180,11 +172,9 @@ type Pong struct { func (x *Pong) Reset() { *x = Pong{} - if protoimpl.UnsafeEnabled { - mi := &file_benchmark_benchmark_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_benchmark_benchmark_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Pong) String() string { @@ -195,7 +185,7 @@ func (*Pong) ProtoMessage() {} func (x *Pong) ProtoReflect() protoreflect.Message { mi := &file_benchmark_benchmark_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -266,68 +256,6 @@ func file_benchmark_benchmark_proto_init() { if File_benchmark_benchmark_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_benchmark_benchmark_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*BenchTell); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_benchmark_benchmark_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*BenchRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_benchmark_benchmark_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*BenchResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_benchmark_benchmark_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Ping); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_benchmark_benchmark_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Pong); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/client/client.go b/client/client.go index d9136d86..6407628d 100644 --- a/client/client.go +++ b/client/client.go @@ -26,10 +26,6 @@ package client import ( "context" - "fmt" - "net" - sdhttp "net/http" - "strconv" "sync" "time" @@ -39,7 +35,7 @@ import ( "github.com/tochemey/goakt/v2/actors" "github.com/tochemey/goakt/v2/address" "github.com/tochemey/goakt/v2/goaktpb" - "github.com/tochemey/goakt/v2/internal/http" + "github.com/tochemey/goakt/v2/internal/errorschain" "github.com/tochemey/goakt/v2/internal/internalpb" "github.com/tochemey/goakt/v2/internal/internalpb/internalpbconnect" "github.com/tochemey/goakt/v2/internal/types" @@ -56,7 +52,6 @@ type Client struct { balancer Balancer closeSignal chan types.Unit refreshInterval time.Duration - client *sdhttp.Client } // New creates an instance of Client. The provided nodes are the cluster nodes. @@ -64,58 +59,46 @@ type Client struct { // and remoting port of the nodes. The nodes list will be load balanced based upon the load-balancing // strategy defined by default round-robin will be used. // An instance of the Client can be reused and it is thread safe. -func New(ctx context.Context, addresses []string, opts ...Option) (*Client, error) { - chain := validation. - New(validation.FailFast()). - AddAssertion(len(addresses) != 0, "addresses are required") - for _, host := range addresses { - chain = chain.AddValidator(validation.NewTCPAddressValidator(host)) - } - if err := chain.Validate(); err != nil { +func New(ctx context.Context, nodes []*Node, opts ...Option) (*Client, error) { + if err := errorschain. + New(errorschain.ReturnFirst()). + AddError(validateNodes(nodes)). + AddError(setNodesMetric(ctx, nodes)). + Error(); err != nil { return nil, err } - client := &Client{ + + cl := &Client{ locker: &sync.Mutex{}, strategy: RoundRobinStrategy, refreshInterval: -1, - client: http.NewClient(), } // apply the various options for _, opt := range opts { - opt.Apply(client) + opt.Apply(cl) } - var nodes []*Node - for _, url := range addresses { - weight, ok, err := client.getNodeMetric(ctx, url) - if err != nil { - return nil, err - } - - if !ok { - continue - } - nodes = append(nodes, NewNode(url, weight)) - } - client.balancer = getBalancer(client.strategy) - client.nodes = nodes - client.balancer.Set(client.nodes...) + cl.balancer = getBalancer(cl.strategy) + cl.nodes = nodes + cl.balancer.Set(cl.nodes...) // only refresh addresses when refresh interval is set - if client.refreshInterval > 0 { - client.closeSignal = make(chan types.Unit, 1) - go client.refreshNodesLoop() + if cl.refreshInterval > 0 { + cl.closeSignal = make(chan types.Unit, 1) + go cl.refreshNodesLoop() } - return client, nil + return cl, nil } // Close closes the Client connection func (x *Client) Close() { x.locker.Lock() + for _, node := range x.nodes { + node.Free() + } x.nodes = make([]*Node, 0) if x.refreshInterval > 0 { close(x.closeSignal) } - x.client.CloseIdleConnections() x.locker.Unlock() } @@ -124,14 +107,19 @@ func (x *Client) Kinds(ctx context.Context) ([]string, error) { x.locker.Lock() defer x.locker.Unlock() - host, port := nextRemotingHostAndPort(x.balancer) + node := nextNode(x.balancer) service := internalpbconnect.NewClusterServiceClient( - http.NewClient(), - http.URL(host, port)) + node.HTTPClient(), + node.HTTPEndPoint(), + ) - response, err := service.GetKinds(ctx, connect.NewRequest(&internalpb.GetKindsRequest{ - NodeAddress: fmt.Sprintf("%s:%d", host, port), - })) + response, err := service.GetKinds( + ctx, connect.NewRequest( + &internalpb.GetKindsRequest{ + NodeAddress: node.Address(), + }, + ), + ) if err != nil { return nil, err } @@ -141,9 +129,10 @@ func (x *Client) Kinds(ctx context.Context) ([]string, error) { // Spawn creates an actor provided the actor name. func (x *Client) Spawn(ctx context.Context, actor *Actor) (err error) { x.locker.Lock() - remoteHost, remotePort := nextRemotingHostAndPort(x.balancer) + node := nextNode(x.balancer) x.locker.Unlock() - return actors.RemoteSpawn(ctx, remoteHost, remotePort, actor.Name(), actor.Kind()) + remoteHost, remotePort := node.HostAndPort() + return node.Remoting().RemoteSpawn(ctx, remoteHost, remotePort, actor.Name(), actor.Kind()) } // SpawnWithBalancer creates an actor provided the actor name and the balancer strategy @@ -151,41 +140,59 @@ func (x *Client) SpawnWithBalancer(ctx context.Context, actor *Actor, strategy B x.locker.Lock() balancer := getBalancer(strategy) balancer.Set(x.nodes...) - remoteHost, remotePort := nextRemotingHostAndPort(balancer) + node := nextNode(balancer) + remoteHost, remotePort := node.HostAndPort() x.locker.Unlock() - return actors.RemoteSpawn(ctx, remoteHost, remotePort, actor.Name(), actor.Kind()) + return node.Remoting().RemoteSpawn(ctx, remoteHost, remotePort, actor.Name(), actor.Kind()) } // ReSpawn restarts a given actor func (x *Client) ReSpawn(ctx context.Context, actor *Actor) (err error) { x.locker.Lock() - remoteHost, remotePort := nextRemotingHostAndPort(x.balancer) + node := nextNode(x.balancer) x.locker.Unlock() - return actors.RemoteReSpawn(ctx, remoteHost, remotePort, actor.Name()) + remoteHost, remotePort := node.HostAndPort() + return node.Remoting().RemoteReSpawn(ctx, remoteHost, remotePort, actor.Name()) } // Tell sends a message to a given actor provided the actor name. // If the given actor does not exist it will be created automatically when // Client mode is enabled func (x *Client) Tell(ctx context.Context, actor *Actor, message proto.Message) error { + x.locker.Lock() + node := nextNode(x.balancer) + x.locker.Unlock() + remoteHost, remotePort := node.HostAndPort() // lookup the actor address - address, err := x.Whereis(ctx, actor) + address, err := node.Remoting().RemoteLookup(ctx, remoteHost, remotePort, actor.Name()) if err != nil { return err } - return actors.RemoteTell(ctx, address, message) + // no address found + if address == nil || proto.Equal(address, new(goaktpb.Address)) { + return actors.ErrActorNotFound(actor.Name()) + } + return node.remoting.RemoteTell(ctx, address, message) } // Ask sends a message to a given actor provided the actor name and expects a response. // If the given actor does not exist it will be created automatically when // Client mode is enabled. This will block until a response is received or timed out. func (x *Client) Ask(ctx context.Context, actor *Actor, message proto.Message, timeout time.Duration) (reply proto.Message, err error) { + x.locker.Lock() + node := nextNode(x.balancer) + x.locker.Unlock() + remoteHost, remotePort := node.HostAndPort() // lookup the actor address - address, err := x.Whereis(ctx, actor) + address, err := node.Remoting().RemoteLookup(ctx, remoteHost, remotePort, actor.Name()) if err != nil { return nil, err } - response, err := actors.RemoteAsk(ctx, address, message, timeout) + // no address found + if address == nil || proto.Equal(address, new(goaktpb.Address)) { + return nil, actors.ErrActorNotFound(actor.Name()) + } + response, err := node.Remoting().RemoteAsk(ctx, address, message, timeout) if err != nil { return nil, err } @@ -195,18 +202,20 @@ func (x *Client) Ask(ctx context.Context, actor *Actor, message proto.Message, t // Stop stops or kills a given actor in the Client func (x *Client) Stop(ctx context.Context, actor *Actor) error { x.locker.Lock() - remoteHost, remotePort := nextRemotingHostAndPort(x.balancer) + node := nextNode(x.balancer) x.locker.Unlock() - return actors.RemoteStop(ctx, remoteHost, remotePort, actor.Name()) + remoteHost, remotePort := node.HostAndPort() + return node.Remoting().RemoteStop(ctx, remoteHost, remotePort, actor.Name()) } // Whereis finds and returns the address of a given actor func (x *Client) Whereis(ctx context.Context, actor *Actor) (*address.Address, error) { x.locker.Lock() - remoteHost, remotePort := nextRemotingHostAndPort(x.balancer) + node := nextNode(x.balancer) x.locker.Unlock() + remoteHost, remotePort := node.HostAndPort() // lookup the actor address - address, err := actors.RemoteLookup(ctx, remoteHost, remotePort, actor.Name()) + address, err := node.remoting.RemoteLookup(ctx, remoteHost, remotePort, actor.Name()) if err != nil { return nil, err } @@ -217,12 +226,9 @@ func (x *Client) Whereis(ctx context.Context, actor *Actor) (*address.Address, e return address, nil } -// nextRemotingHostAndPort returns the next node host and port -func nextRemotingHostAndPort(balancer Balancer) (host string, port int) { - node := balancer.Next() - host, p, _ := net.SplitHostPort(node.Address()) - port, _ = strconv.Atoi(p) - return +// nextNode returns the next node host and port +func nextNode(balancer Balancer) *Node { + return balancer.Next() } // updateNodes updates the list of nodes availables in the pool @@ -232,7 +238,7 @@ func (x *Client) updateNodes(ctx context.Context) error { defer x.locker.Lock() for _, node := range x.nodes { - weight, ok, err := x.getNodeMetric(ctx, node.Address()) + weight, ok, err := getNodeMetric(ctx, node) if err != nil { return err } @@ -281,14 +287,13 @@ func getBalancer(strategy BalancerStrategy) Balancer { } // getNodeMetric pings a given node and get the node metric info and -func (x *Client) getNodeMetric(ctx context.Context, node string) (int, bool, error) { - host, p, _ := net.SplitHostPort(node) - port, _ := strconv.Atoi(p) +func getNodeMetric(ctx context.Context, node *Node) (int, bool, error) { service := internalpbconnect.NewClusterServiceClient( - x.client, - http.URL(host, port)) + node.HTTPClient(), + node.HTTPEndPoint(), + ) - response, err := service.GetNodeMetric(ctx, connect.NewRequest(&internalpb.GetNodeMetricRequest{NodeAddress: node})) + response, err := service.GetNodeMetric(ctx, connect.NewRequest(&internalpb.GetNodeMetricRequest{NodeAddress: node.Address()})) if err != nil { code := connect.CodeOf(err) // here node may not be available @@ -302,3 +307,37 @@ func (x *Client) getNodeMetric(ctx context.Context, node string) (int, bool, err } return int(response.Msg.GetActorsCount()), true, nil } + +// validateNodes validate the incoming nodes +func validateNodes(nodes []*Node) error { + errs := make([]error, len(nodes)) + for index, node := range nodes { + errs[index] = node.Validate() + } + + return errorschain. + New(errorschain.ReturnFirst()). + AddError( + validation. + New(validation.FailFast()). + AddAssertion(len(nodes) != 0, "nodes are required").Validate(), + ). + AddErrors(errs...). + Error() +} + +// setNodesMetric +func setNodesMetric(ctx context.Context, nodes []*Node) error { + for _, node := range nodes { + weight, ok, err := getNodeMetric(ctx, node) + if err != nil { + return err + } + + if !ok { + continue + } + node.SetWeight(float64(weight)) + } + return nil +} diff --git a/client/client_test.go b/client/client_test.go index 140d3fff..e3a4f790 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -69,7 +69,12 @@ func TestClient(t *testing.T) { fmt.Sprintf("%s:%d", node3Host, node3Port), } - client, err := New(ctx, addresses) + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New(ctx, nodes) require.NoError(t, err) require.NotNil(t, client) @@ -107,20 +112,21 @@ func TestClient(t *testing.T) { err = client.Stop(ctx, actor) require.NoError(t, err) - t.Cleanup(func() { - client.Close() + t.Cleanup( + func() { + client.Close() - require.NoError(t, sys1.Stop(ctx)) - require.NoError(t, sys2.Stop(ctx)) - require.NoError(t, sys3.Stop(ctx)) + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) - require.NoError(t, sd1.Close()) - require.NoError(t, sd2.Close()) - require.NoError(t, sd3.Close()) + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) - srv.Shutdown() - lib.Pause(time.Second) - }) + srv.Shutdown() + lib.Pause(time.Second) + }) }) t.Run("With randomRouter strategy", func(t *testing.T) { ctx := context.TODO() @@ -142,9 +148,16 @@ func TestClient(t *testing.T) { fmt.Sprintf("%s:%d", node3Host, node3Port), } - client, err := New(ctx, - addresses, - WithBalancerStrategy(RoundRobinStrategy)) + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New( + ctx, + nodes, + WithBalancerStrategy(RoundRobinStrategy), + ) require.NoError(t, err) require.NotNil(t, client) @@ -183,21 +196,23 @@ func TestClient(t *testing.T) { err = client.Stop(ctx, actor) require.NoError(t, err) - t.Cleanup(func() { - client.Close() + t.Cleanup( + func() { + client.Close() - require.NoError(t, sys1.Stop(ctx)) - require.NoError(t, sys2.Stop(ctx)) - require.NoError(t, sys3.Stop(ctx)) + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) - require.NoError(t, sd1.Close()) - require.NoError(t, sd2.Close()) - require.NoError(t, sd3.Close()) + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) - srv.Shutdown() + srv.Shutdown() - lib.Pause(time.Second) - }) + lib.Pause(time.Second) + }, + ) }) t.Run("With Least-Load strategy", func(t *testing.T) { ctx := context.TODO() @@ -219,9 +234,16 @@ func TestClient(t *testing.T) { fmt.Sprintf("%s:%d", node3Host, node3Port), } - client, err := New(ctx, - addresses, - WithBalancerStrategy(LeastLoadStrategy)) + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New( + ctx, + nodes, + WithBalancerStrategy(LeastLoadStrategy), + ) require.NoError(t, err) require.NotNil(t, client) @@ -260,20 +282,22 @@ func TestClient(t *testing.T) { err = client.Stop(ctx, actor) require.NoError(t, err) - t.Cleanup(func() { - client.Close() + t.Cleanup( + func() { + client.Close() - require.NoError(t, sys1.Stop(ctx)) - require.NoError(t, sys2.Stop(ctx)) - require.NoError(t, sys3.Stop(ctx)) + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) - require.NoError(t, sd1.Close()) - require.NoError(t, sd2.Close()) - require.NoError(t, sd3.Close()) + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) - srv.Shutdown() - lib.Pause(time.Second) - }) + srv.Shutdown() + lib.Pause(time.Second) + }, + ) }) t.Run("With Refresh Interval", func(t *testing.T) { ctx := context.TODO() @@ -297,7 +321,12 @@ func TestClient(t *testing.T) { fmt.Sprintf("%s:%d", node3Host, node3Port), } - client, err := New(ctx, addresses, WithRefresh(time.Minute)) + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New(ctx, nodes, WithRefresh(time.Minute)) require.NoError(t, err) require.NotNil(t, client) @@ -335,21 +364,23 @@ func TestClient(t *testing.T) { err = client.Stop(ctx, actor) require.NoError(t, err) - t.Cleanup(func() { - client.Close() + t.Cleanup( + func() { + client.Close() - require.NoError(t, sys1.Stop(ctx)) - require.NoError(t, sys2.Stop(ctx)) - require.NoError(t, sys3.Stop(ctx)) + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) - require.NoError(t, sd1.Close()) - require.NoError(t, sd2.Close()) - require.NoError(t, sd3.Close()) + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) - srv.Shutdown() + srv.Shutdown() - lib.Pause(time.Second) - }) + lib.Pause(time.Second) + }, + ) }) t.Run("With SpawnWithBalancer", func(t *testing.T) { ctx := context.TODO() @@ -373,7 +404,12 @@ func TestClient(t *testing.T) { fmt.Sprintf("%s:%d", node3Host, node3Port), } - client, err := New(ctx, addresses) + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New(ctx, nodes) require.NoError(t, err) require.NotNil(t, client) @@ -411,20 +447,22 @@ func TestClient(t *testing.T) { err = client.Stop(ctx, actor) require.NoError(t, err) - t.Cleanup(func() { - client.Close() + t.Cleanup( + func() { + client.Close() - require.NoError(t, sys1.Stop(ctx)) - require.NoError(t, sys2.Stop(ctx)) - require.NoError(t, sys3.Stop(ctx)) + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) - require.NoError(t, sd1.Close()) - require.NoError(t, sd2.Close()) - require.NoError(t, sd3.Close()) + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) - srv.Shutdown() - lib.Pause(time.Second) - }) + srv.Shutdown() + lib.Pause(time.Second) + }, + ) }) t.Run("With ReSpawn", func(t *testing.T) { ctx := context.TODO() @@ -448,7 +486,12 @@ func TestClient(t *testing.T) { fmt.Sprintf("%s:%d", node3Host, node3Port), } - client, err := New(ctx, addresses) + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New(ctx, nodes) require.NoError(t, err) require.NotNil(t, client) @@ -491,20 +534,22 @@ func TestClient(t *testing.T) { err = client.Stop(ctx, actor) require.NoError(t, err) - t.Cleanup(func() { - client.Close() + t.Cleanup( + func() { + client.Close() - require.NoError(t, sys1.Stop(ctx)) - require.NoError(t, sys2.Stop(ctx)) - require.NoError(t, sys3.Stop(ctx)) + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) - require.NoError(t, sd1.Close()) - require.NoError(t, sd2.Close()) - require.NoError(t, sd3.Close()) + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) - srv.Shutdown() - lib.Pause(time.Second) - }) + srv.Shutdown() + lib.Pause(time.Second) + }, + ) }) t.Run("With ReSpawn after Stop", func(t *testing.T) { ctx := context.TODO() @@ -528,7 +573,12 @@ func TestClient(t *testing.T) { fmt.Sprintf("%s:%d", node3Host, node3Port), } - client, err := New(ctx, addresses) + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New(ctx, nodes) require.NoError(t, err) require.NotNil(t, client) @@ -571,29 +621,107 @@ func TestClient(t *testing.T) { err = client.ReSpawn(ctx, actor) require.NoError(t, err) - t.Cleanup(func() { - client.Close() + t.Cleanup( + func() { + client.Close() - require.NoError(t, sys1.Stop(ctx)) - require.NoError(t, sys2.Stop(ctx)) - require.NoError(t, sys3.Stop(ctx)) + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) - require.NoError(t, sd1.Close()) - require.NoError(t, sd2.Close()) - require.NoError(t, sd3.Close()) + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) - srv.Shutdown() - lib.Pause(time.Second) - }) + srv.Shutdown() + lib.Pause(time.Second) + }, + ) + }) + t.Run("With Whereis", func(t *testing.T) { + ctx := context.TODO() + + logger := log.DiscardLogger + + // start the NATS server + srv := startNatsServer(t) + addr := srv.Addr().String() + + sys1, node1Host, node1Port, sd1 := startNode(t, logger, "node1", addr) + sys2, node2Host, node2Port, sd2 := startNode(t, logger, "node2", addr) + sys3, node3Host, node3Port, sd3 := startNode(t, logger, "node3", addr) + + // wait for a proper and clean setup of the cluster + lib.Pause(time.Second) + + addresses := []string{ + fmt.Sprintf("%s:%d", node1Host, node1Port), + fmt.Sprintf("%s:%d", node2Host, node2Port), + fmt.Sprintf("%s:%d", node3Host, node3Port), + } + + nodes := make([]*Node, len(addresses)) + for i, addr := range addresses { + nodes[i] = NewNode(addr) + } + + client, err := New(ctx, nodes) + require.NoError(t, err) + require.NotNil(t, client) + + kinds, err := client.Kinds(ctx) + require.NoError(t, err) + require.NotNil(t, kinds) + require.NotEmpty(t, kinds) + require.Len(t, kinds, 2) + + expected := []string{ + "actors.funcactor", + "client.testactor", + } + + require.ElementsMatch(t, expected, kinds) + actor := NewActor("client.testactor").WithName("actorName") + + err = client.Spawn(ctx, actor) + require.NoError(t, err) + + lib.Pause(time.Second) + + whereis, err := client.Whereis(ctx, actor) + require.NoError(t, err) + require.NotNil(t, whereis) + assert.Equal(t, actor.Name(), whereis.Name()) + + err = client.Stop(ctx, actor) + require.NoError(t, err) + + t.Cleanup( + func() { + client.Close() + + require.NoError(t, sys1.Stop(ctx)) + require.NoError(t, sys2.Stop(ctx)) + require.NoError(t, sys3.Stop(ctx)) + + require.NoError(t, sd1.Close()) + require.NoError(t, sd2.Close()) + require.NoError(t, sd3.Close()) + + srv.Shutdown() + lib.Pause(time.Second) + }) }) } func startNatsServer(t *testing.T) *natsserver.Server { t.Helper() - serv, err := natsserver.NewServer(&natsserver.Options{ - Host: "127.0.0.1", - Port: -1, - }) + serv, err := natsserver.NewServer( + &natsserver.Options{ + Host: "127.0.0.1", + Port: -1, + }, + ) require.NoError(t, err) @@ -663,7 +791,8 @@ func startNode(t *testing.T, logger log.Logger, nodeName, serverAddr string) (sy actors.WithReplyTimeout(time.Minute), actors.WithRemoting(host, int32(remotePort)), actors.WithPeerStateLoopInterval(100*time.Millisecond), - actors.WithCluster(clusterConfig)) + actors.WithCluster(clusterConfig), + ) require.NotNil(t, system) require.NoError(t, err) diff --git a/client/least_load_test.go b/client/least_load_test.go index 14854cb2..97962d99 100644 --- a/client/least_load_test.go +++ b/client/least_load_test.go @@ -32,9 +32,11 @@ import ( func TestLeadLoad(t *testing.T) { balancer := NewLeastLoad() - balancer.Set(NewNode("192.168.34.10:3322", 2), - NewNode("192.168.34.11:3322", 0), - NewNode("192.168.34.12:3322", 1)) + balancer.Set( + NewNode("192.168.34.10:3322", WithWeight(2)), + NewNode("192.168.34.11:3322", WithWeight(0)), + NewNode("192.168.34.12:3322", WithWeight(1)), + ) actual := balancer.Next() assert.Equal(t, "192.168.34.11:3322", actual.Address()) } diff --git a/client/node.go b/client/node.go index efbadaea..8ae26d95 100644 --- a/client/node.go +++ b/client/node.go @@ -24,24 +24,54 @@ package client -import "sync" +import ( + "net" + nethttp "net/http" + "strconv" + "sync" + + "github.com/tochemey/goakt/v2/actors" + "github.com/tochemey/goakt/v2/internal/http" + "github.com/tochemey/goakt/v2/internal/validation" +) + +type NodeOption func(*Node) + +// WithWeight set the node weight +func WithWeight(weight float64) NodeOption { + return func(n *Node) { + n.weight = weight + } +} // Node represents the node in the cluster type Node struct { address string weight float64 mutex *sync.Mutex + + client *nethttp.Client + remoting *actors.Remoting } // NewNode creates an instance of Node -func NewNode(address string, weight int) *Node { - return &Node{ - address: address, - weight: float64(weight), - mutex: &sync.Mutex{}, +func NewNode(address string, opts ...NodeOption) *Node { + node := &Node{ + address: address, + mutex: &sync.Mutex{}, + client: http.NewClient(), + remoting: actors.NewRemoting(), + weight: 0, } + for _, opt := range opts { + opt(node) + } + + return node } +var _ validation.Validator = (*Node)(nil) + // SetWeight sets the node weight. // This is thread safe func (n *Node) SetWeight(weight float64) { @@ -65,3 +95,48 @@ func (n *Node) Weight() float64 { n.mutex.Unlock() return load } + +func (n *Node) Validate() error { + address := n.Address() + return validation.NewTCPAddressValidator(address).Validate() +} + +// HTTPClient returns the underlying http client for the given node +func (n *Node) HTTPClient() *nethttp.Client { + n.mutex.Lock() + client := n.client + n.mutex.Unlock() + return client +} + +// Remoting returns the remoting instance +func (n *Node) Remoting() *actors.Remoting { + n.mutex.Lock() + remoting := n.remoting + n.mutex.Unlock() + return remoting +} + +// HTTPEndPoint returns the node remote endpoint +func (n *Node) HTTPEndPoint() string { + n.mutex.Lock() + host, p, _ := net.SplitHostPort(n.address) + port, _ := strconv.Atoi(p) + n.mutex.Unlock() + return http.URL(host, port) +} + +// Free closes the underlying http client connection of the given node +func (n *Node) Free() { + n.HTTPClient().CloseIdleConnections() + n.Remoting().Close() +} + +// HostAndPort returns the node host and port +func (n *Node) HostAndPort() (string, int) { + n.mutex.Lock() + host, p, _ := net.SplitHostPort(n.address) + port, _ := strconv.Atoi(p) + n.mutex.Unlock() + return host, port +} diff --git a/client/round_robin_test.go b/client/round_robin_test.go index ff99d48b..3d823a2d 100644 --- a/client/round_robin_test.go +++ b/client/round_robin_test.go @@ -39,9 +39,11 @@ func TestRoundRobin(t *testing.T) { } balancer := NewRoundRobin() - balancer.Set(NewNode("192.168.34.10:3322", 2), - NewNode("192.168.34.11:3322", 0), - NewNode("192.168.34.12:3322", 1)) + balancer.Set( + NewNode("192.168.34.10:3322", WithWeight(2)), + NewNode("192.168.34.11:3322", WithWeight(0)), + NewNode("192.168.34.12:3322", WithWeight(1)), + ) actual := make([]string, 4) for i := 0; i < 4; i++ { diff --git a/goaktpb/goakt.pb.go b/goaktpb/goakt.pb.go index 9445f41e..43187b60 100644 --- a/goaktpb/goakt.pb.go +++ b/goaktpb/goakt.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: goakt/goakt.proto @@ -44,11 +44,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -59,7 +57,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -137,11 +135,9 @@ type Deadletter struct { func (x *Deadletter) Reset() { *x = Deadletter{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Deadletter) String() string { @@ -152,7 +148,7 @@ func (*Deadletter) ProtoMessage() {} func (x *Deadletter) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -216,11 +212,9 @@ type ActorStarted struct { func (x *ActorStarted) Reset() { *x = ActorStarted{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActorStarted) String() string { @@ -231,7 +225,7 @@ func (*ActorStarted) ProtoMessage() {} func (x *ActorStarted) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -274,11 +268,9 @@ type ActorStopped struct { func (x *ActorStopped) Reset() { *x = ActorStopped{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActorStopped) String() string { @@ -289,7 +281,7 @@ func (*ActorStopped) ProtoMessage() {} func (x *ActorStopped) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -332,11 +324,9 @@ type ActorPassivated struct { func (x *ActorPassivated) Reset() { *x = ActorPassivated{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActorPassivated) String() string { @@ -347,7 +337,7 @@ func (*ActorPassivated) ProtoMessage() {} func (x *ActorPassivated) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -392,11 +382,9 @@ type ActorChildCreated struct { func (x *ActorChildCreated) Reset() { *x = ActorChildCreated{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActorChildCreated) String() string { @@ -407,7 +395,7 @@ func (*ActorChildCreated) ProtoMessage() {} func (x *ActorChildCreated) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -457,11 +445,9 @@ type ActorRestarted struct { func (x *ActorRestarted) Reset() { *x = ActorRestarted{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActorRestarted) String() string { @@ -472,7 +458,7 @@ func (*ActorRestarted) ProtoMessage() {} func (x *ActorRestarted) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -515,11 +501,9 @@ type NodeJoined struct { func (x *NodeJoined) Reset() { *x = NodeJoined{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NodeJoined) String() string { @@ -530,7 +514,7 @@ func (*NodeJoined) ProtoMessage() {} func (x *NodeJoined) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -573,11 +557,9 @@ type NodeLeft struct { func (x *NodeLeft) Reset() { *x = NodeLeft{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NodeLeft) String() string { @@ -588,7 +570,7 @@ func (*NodeLeft) ProtoMessage() {} func (x *NodeLeft) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -630,11 +612,9 @@ type Terminated struct { func (x *Terminated) Reset() { *x = Terminated{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Terminated) String() string { @@ -645,7 +625,7 @@ func (*Terminated) ProtoMessage() {} func (x *Terminated) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -678,11 +658,9 @@ type PoisonPill struct { func (x *PoisonPill) Reset() { *x = PoisonPill{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PoisonPill) String() string { @@ -693,7 +671,7 @@ func (*PoisonPill) ProtoMessage() {} func (x *PoisonPill) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -717,11 +695,9 @@ type PostStart struct { func (x *PostStart) Reset() { *x = PostStart{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PostStart) String() string { @@ -732,7 +708,7 @@ func (*PostStart) ProtoMessage() {} func (x *PostStart) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -759,11 +735,9 @@ type Broadcast struct { func (x *Broadcast) Reset() { *x = Broadcast{} - if protoimpl.UnsafeEnabled { - mi := &file_goakt_goakt_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_goakt_goakt_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Broadcast) String() string { @@ -774,7 +748,7 @@ func (*Broadcast) ProtoMessage() {} func (x *Broadcast) ProtoReflect() protoreflect.Message { mi := &file_goakt_goakt_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -963,164 +937,6 @@ func file_goakt_goakt_proto_init() { if File_goakt_goakt_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_goakt_goakt_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Deadletter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ActorStarted); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ActorStopped); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ActorPassivated); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ActorChildCreated); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ActorRestarted); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*NodeJoined); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*NodeLeft); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*Terminated); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*PoisonPill); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*PostStart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_goakt_goakt_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*Broadcast); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/internal/errorschain/errorschain.go b/internal/errorschain/errorschain.go index b6c1243a..dda41a97 100644 --- a/internal/errorschain/errorschain.go +++ b/internal/errorschain/errorschain.go @@ -55,6 +55,12 @@ func (c *Chain) AddError(err error) *Chain { return c } +// AddErrors add a slice of errors to the chain. Remember the slice order does matter here +func (c *Chain) AddErrors(errs ...error) *Chain { + c.errs = append(c.errs, errs...) + return c +} + // Error returns the error func (c *Chain) Error() error { var err error diff --git a/internal/http/http.go b/internal/http/http.go index 64926716..6dce2c2d 100644 --- a/internal/http/http.go +++ b/internal/http/http.go @@ -79,9 +79,11 @@ func NewServer(ctx context.Context, host string, port int, mux *http.ServeMux) * IdleTimeout: 1200 * time.Second, // For gRPC clients, it's convenient to support HTTP/2 without TLS. You can // avoid x/net/http2 by using http.ListenAndServeTLS. - Handler: h2c.NewHandler(mux, &http2.Server{ - IdleTimeout: 1200 * time.Second, - }), + Handler: h2c.NewHandler( + mux, &http2.Server{ + IdleTimeout: 1200 * time.Second, + }, + ), BaseContext: func(_ net.Listener) context.Context { return ctx }, diff --git a/internal/internalpb/actor.pb.go b/internal/internalpb/actor.pb.go index 84f4e62a..02dcd335 100644 --- a/internal/internalpb/actor.pb.go +++ b/internal/internalpb/actor.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: internal/actor.proto @@ -35,11 +35,9 @@ type ActorRef struct { func (x *ActorRef) Reset() { *x = ActorRef{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_actor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_actor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActorRef) String() string { @@ -50,7 +48,7 @@ func (*ActorRef) ProtoMessage() {} func (x *ActorRef) ProtoReflect() protoreflect.Message { mi := &file_internal_actor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -136,20 +134,6 @@ func file_internal_actor_proto_init() { if File_internal_actor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_internal_actor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ActorRef); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/internal/internalpb/cluster.pb.go b/internal/internalpb/cluster.pb.go index 5c645889..2061332d 100644 --- a/internal/internalpb/cluster.pb.go +++ b/internal/internalpb/cluster.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: internal/cluster.proto @@ -31,11 +31,9 @@ type GetNodeMetricRequest struct { func (x *GetNodeMetricRequest) Reset() { *x = GetNodeMetricRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_cluster_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_cluster_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNodeMetricRequest) String() string { @@ -46,7 +44,7 @@ func (*GetNodeMetricRequest) ProtoMessage() {} func (x *GetNodeMetricRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_cluster_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -81,11 +79,9 @@ type GetNodeMetricResponse struct { func (x *GetNodeMetricResponse) Reset() { *x = GetNodeMetricResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_cluster_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_cluster_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNodeMetricResponse) String() string { @@ -96,7 +92,7 @@ func (*GetNodeMetricResponse) ProtoMessage() {} func (x *GetNodeMetricResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_cluster_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -136,11 +132,9 @@ type GetKindsRequest struct { func (x *GetKindsRequest) Reset() { *x = GetKindsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_cluster_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_cluster_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetKindsRequest) String() string { @@ -151,7 +145,7 @@ func (*GetKindsRequest) ProtoMessage() {} func (x *GetKindsRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_cluster_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -184,11 +178,9 @@ type GetKindsResponse struct { func (x *GetKindsResponse) Reset() { *x = GetKindsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_cluster_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_cluster_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetKindsResponse) String() string { @@ -199,7 +191,7 @@ func (*GetKindsResponse) ProtoMessage() {} func (x *GetKindsResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_cluster_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -303,56 +295,6 @@ func file_internal_cluster_proto_init() { if File_internal_cluster_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_internal_cluster_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GetNodeMetricRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_cluster_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GetNodeMetricResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_cluster_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*GetKindsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_cluster_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*GetKindsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/internal/internalpb/nats.pb.go b/internal/internalpb/nats.pb.go index 16148103..14c836ee 100644 --- a/internal/internalpb/nats.pb.go +++ b/internal/internalpb/nats.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: internal/nats.proto @@ -92,11 +92,9 @@ type NatsMessage struct { func (x *NatsMessage) Reset() { *x = NatsMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_nats_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_nats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NatsMessage) String() string { @@ -107,7 +105,7 @@ func (*NatsMessage) ProtoMessage() {} func (x *NatsMessage) ProtoReflect() protoreflect.Message { mi := &file_internal_nats_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -218,20 +216,6 @@ func file_internal_nats_proto_init() { if File_internal_nats_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_internal_nats_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*NatsMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/internal/internalpb/peers.pb.go b/internal/internalpb/peers.pb.go index bd31453c..cc98a8ed 100644 --- a/internal/internalpb/peers.pb.go +++ b/internal/internalpb/peers.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: internal/peers.proto @@ -39,11 +39,9 @@ type PeersSync struct { func (x *PeersSync) Reset() { *x = PeersSync{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_peers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_peers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PeersSync) String() string { @@ -54,7 +52,7 @@ func (*PeersSync) ProtoMessage() {} func (x *PeersSync) ProtoReflect() protoreflect.Message { mi := &file_internal_peers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -114,11 +112,9 @@ type PeerState struct { func (x *PeerState) Reset() { *x = PeerState{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_peers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_peers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PeerState) String() string { @@ -129,7 +125,7 @@ func (*PeerState) ProtoMessage() {} func (x *PeerState) ProtoReflect() protoreflect.Message { mi := &file_internal_peers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -244,32 +240,6 @@ func file_internal_peers_proto_init() { return } file_internal_actor_proto_init() - if !protoimpl.UnsafeEnabled { - file_internal_peers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PeersSync); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_peers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*PeerState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/internal/internalpb/remoting.pb.go b/internal/internalpb/remoting.pb.go index f3cd2830..d7831d09 100644 --- a/internal/internalpb/remoting.pb.go +++ b/internal/internalpb/remoting.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: internal/remoting.proto @@ -38,11 +38,9 @@ type RemoteAskRequest struct { func (x *RemoteAskRequest) Reset() { *x = RemoteAskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteAskRequest) String() string { @@ -53,7 +51,7 @@ func (*RemoteAskRequest) ProtoMessage() {} func (x *RemoteAskRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -94,11 +92,9 @@ type RemoteAskResponse struct { func (x *RemoteAskResponse) Reset() { *x = RemoteAskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteAskResponse) String() string { @@ -109,7 +105,7 @@ func (*RemoteAskResponse) ProtoMessage() {} func (x *RemoteAskResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -143,11 +139,9 @@ type RemoteTellRequest struct { func (x *RemoteTellRequest) Reset() { *x = RemoteTellRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteTellRequest) String() string { @@ -158,7 +152,7 @@ func (*RemoteTellRequest) ProtoMessage() {} func (x *RemoteTellRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -188,11 +182,9 @@ type RemoteTellResponse struct { func (x *RemoteTellResponse) Reset() { *x = RemoteTellResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteTellResponse) String() string { @@ -203,7 +195,7 @@ func (*RemoteTellResponse) ProtoMessage() {} func (x *RemoteTellResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -234,11 +226,9 @@ type RemoteLookupRequest struct { func (x *RemoteLookupRequest) Reset() { *x = RemoteLookupRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteLookupRequest) String() string { @@ -249,7 +239,7 @@ func (*RemoteLookupRequest) ProtoMessage() {} func (x *RemoteLookupRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -296,11 +286,9 @@ type RemoteLookupResponse struct { func (x *RemoteLookupResponse) Reset() { *x = RemoteLookupResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteLookupResponse) String() string { @@ -311,7 +299,7 @@ func (*RemoteLookupResponse) ProtoMessage() {} func (x *RemoteLookupResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -350,11 +338,9 @@ type RemoteMessage struct { func (x *RemoteMessage) Reset() { *x = RemoteMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteMessage) String() string { @@ -365,7 +351,7 @@ func (*RemoteMessage) ProtoMessage() {} func (x *RemoteMessage) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -416,11 +402,9 @@ type RemoteReSpawnRequest struct { func (x *RemoteReSpawnRequest) Reset() { *x = RemoteReSpawnRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteReSpawnRequest) String() string { @@ -431,7 +415,7 @@ func (*RemoteReSpawnRequest) ProtoMessage() {} func (x *RemoteReSpawnRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -475,11 +459,9 @@ type RemoteReSpawnResponse struct { func (x *RemoteReSpawnResponse) Reset() { *x = RemoteReSpawnResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteReSpawnResponse) String() string { @@ -490,7 +472,7 @@ func (*RemoteReSpawnResponse) ProtoMessage() {} func (x *RemoteReSpawnResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -520,11 +502,9 @@ type RemoteStopRequest struct { func (x *RemoteStopRequest) Reset() { *x = RemoteStopRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteStopRequest) String() string { @@ -535,7 +515,7 @@ func (*RemoteStopRequest) ProtoMessage() {} func (x *RemoteStopRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -579,11 +559,9 @@ type RemoteStopResponse struct { func (x *RemoteStopResponse) Reset() { *x = RemoteStopResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteStopResponse) String() string { @@ -594,7 +572,7 @@ func (*RemoteStopResponse) ProtoMessage() {} func (x *RemoteStopResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -626,11 +604,9 @@ type RemoteSpawnRequest struct { func (x *RemoteSpawnRequest) Reset() { *x = RemoteSpawnRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteSpawnRequest) String() string { @@ -641,7 +617,7 @@ func (*RemoteSpawnRequest) ProtoMessage() {} func (x *RemoteSpawnRequest) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -692,11 +668,9 @@ type RemoteSpawnResponse struct { func (x *RemoteSpawnResponse) Reset() { *x = RemoteSpawnResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_remoting_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_remoting_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoteSpawnResponse) String() string { @@ -707,7 +681,7 @@ func (*RemoteSpawnResponse) ProtoMessage() {} func (x *RemoteSpawnResponse) ProtoReflect() protoreflect.Message { mi := &file_internal_remoting_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -903,164 +877,6 @@ func file_internal_remoting_proto_init() { if File_internal_remoting_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_internal_remoting_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*RemoteAskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RemoteAskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*RemoteTellRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*RemoteTellResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*RemoteLookupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*RemoteLookupResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*RemoteMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*RemoteReSpawnRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*RemoteReSpawnResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*RemoteStopRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*RemoteStopResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*RemoteSpawnRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_remoting_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*RemoteSpawnResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/protos/internal/peers.proto b/protos/internal/peers.proto index 20a9d3b5..9d9143de 100644 --- a/protos/internal/peers.proto +++ b/protos/internal/peers.proto @@ -29,3 +29,4 @@ message PeerState { // Specifies the list of actors repeated ActorRef actors = 4; } + diff --git a/test/data/testpb/test.pb.go b/test/data/testpb/test.pb.go index fe1e7991..7d531e86 100644 --- a/test/data/testpb/test.pb.go +++ b/test/data/testpb/test.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: test/test.proto @@ -28,11 +28,9 @@ type TestReply struct { func (x *TestReply) Reset() { *x = TestReply{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestReply) String() string { @@ -43,7 +41,7 @@ func (*TestReply) ProtoMessage() {} func (x *TestReply) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,11 +64,9 @@ type TestPanic struct { func (x *TestPanic) Reset() { *x = TestPanic{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestPanic) String() string { @@ -81,7 +77,7 @@ func (*TestPanic) ProtoMessage() {} func (x *TestPanic) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -104,11 +100,9 @@ type TestTimeout struct { func (x *TestTimeout) Reset() { *x = TestTimeout{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestTimeout) String() string { @@ -119,7 +113,7 @@ func (*TestTimeout) ProtoMessage() {} func (x *TestTimeout) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -144,11 +138,9 @@ type Reply struct { func (x *Reply) Reset() { *x = Reply{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Reply) String() string { @@ -159,7 +151,7 @@ func (*Reply) ProtoMessage() {} func (x *Reply) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -189,11 +181,9 @@ type TestSend struct { func (x *TestSend) Reset() { *x = TestSend{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestSend) String() string { @@ -204,7 +194,7 @@ func (*TestSend) ProtoMessage() {} func (x *TestSend) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -227,11 +217,9 @@ type TestRemoteSend struct { func (x *TestRemoteSend) Reset() { *x = TestRemoteSend{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestRemoteSend) String() string { @@ -242,7 +230,7 @@ func (*TestRemoteSend) ProtoMessage() {} func (x *TestRemoteSend) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -268,11 +256,9 @@ type Account struct { func (x *Account) Reset() { *x = Account{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Account) String() string { @@ -283,7 +269,7 @@ func (*Account) ProtoMessage() {} func (x *Account) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -322,11 +308,9 @@ type CreateAccount struct { func (x *CreateAccount) Reset() { *x = CreateAccount{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateAccount) String() string { @@ -337,7 +321,7 @@ func (*CreateAccount) ProtoMessage() {} func (x *CreateAccount) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -370,11 +354,9 @@ type CreditAccount struct { func (x *CreditAccount) Reset() { *x = CreditAccount{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreditAccount) String() string { @@ -385,7 +367,7 @@ func (*CreditAccount) ProtoMessage() {} func (x *CreditAccount) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -425,11 +407,9 @@ type AccountCreated struct { func (x *AccountCreated) Reset() { *x = AccountCreated{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AccountCreated) String() string { @@ -440,7 +420,7 @@ func (*AccountCreated) ProtoMessage() {} func (x *AccountCreated) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -480,11 +460,9 @@ type AccountCredited struct { func (x *AccountCredited) Reset() { *x = AccountCredited{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AccountCredited) String() string { @@ -495,7 +473,7 @@ func (*AccountCredited) ProtoMessage() {} func (x *AccountCredited) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -532,11 +510,9 @@ type DebitAccount struct { func (x *DebitAccount) Reset() { *x = DebitAccount{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DebitAccount) String() string { @@ -547,7 +523,7 @@ func (*DebitAccount) ProtoMessage() {} func (x *DebitAccount) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -570,11 +546,9 @@ type AccountDebited struct { func (x *AccountDebited) Reset() { *x = AccountDebited{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AccountDebited) String() string { @@ -585,7 +559,7 @@ func (*AccountDebited) ProtoMessage() {} func (x *AccountDebited) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -608,11 +582,9 @@ type TestLogin struct { func (x *TestLogin) Reset() { *x = TestLogin{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestLogin) String() string { @@ -623,7 +595,7 @@ func (*TestLogin) ProtoMessage() {} func (x *TestLogin) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -646,11 +618,9 @@ type TestLoginSuccess struct { func (x *TestLoginSuccess) Reset() { *x = TestLoginSuccess{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestLoginSuccess) String() string { @@ -661,7 +631,7 @@ func (*TestLoginSuccess) ProtoMessage() {} func (x *TestLoginSuccess) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -684,11 +654,9 @@ type TestReadiness struct { func (x *TestReadiness) Reset() { *x = TestReadiness{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestReadiness) String() string { @@ -699,7 +667,7 @@ func (*TestReadiness) ProtoMessage() {} func (x *TestReadiness) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -722,11 +690,9 @@ type TestReady struct { func (x *TestReady) Reset() { *x = TestReady{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestReady) String() string { @@ -737,7 +703,7 @@ func (*TestReady) ProtoMessage() {} func (x *TestReady) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -760,11 +726,9 @@ type TestBye struct { func (x *TestBye) Reset() { *x = TestBye{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestBye) String() string { @@ -775,7 +739,7 @@ func (*TestBye) ProtoMessage() {} func (x *TestBye) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -798,11 +762,9 @@ type TestStash struct { func (x *TestStash) Reset() { *x = TestStash{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestStash) String() string { @@ -813,7 +775,7 @@ func (*TestStash) ProtoMessage() {} func (x *TestStash) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -836,11 +798,9 @@ type TestUnstash struct { func (x *TestUnstash) Reset() { *x = TestUnstash{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestUnstash) String() string { @@ -851,7 +811,7 @@ func (*TestUnstash) ProtoMessage() {} func (x *TestUnstash) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -874,11 +834,9 @@ type TestUnstashAll struct { func (x *TestUnstashAll) Reset() { *x = TestUnstashAll{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestUnstashAll) String() string { @@ -889,7 +847,7 @@ func (*TestUnstashAll) ProtoMessage() {} func (x *TestUnstashAll) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -912,11 +870,9 @@ type Ping struct { func (x *Ping) Reset() { *x = Ping{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Ping) String() string { @@ -927,7 +883,7 @@ func (*Ping) ProtoMessage() {} func (x *Ping) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -950,11 +906,9 @@ type Pong struct { func (x *Pong) Reset() { *x = Pong{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Pong) String() string { @@ -965,7 +919,7 @@ func (*Pong) ProtoMessage() {} func (x *Pong) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -990,11 +944,9 @@ type Wait struct { func (x *Wait) Reset() { *x = Wait{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Wait) String() string { @@ -1005,7 +957,7 @@ func (*Wait) ProtoMessage() {} func (x *Wait) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1037,11 +989,9 @@ type RunTask struct { func (x *RunTask) Reset() { *x = RunTask{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RunTask) String() string { @@ -1052,7 +1002,7 @@ func (*RunTask) ProtoMessage() {} func (x *RunTask) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1082,11 +1032,9 @@ type TaskComplete struct { func (x *TaskComplete) Reset() { *x = TaskComplete{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskComplete) String() string { @@ -1097,7 +1045,7 @@ func (*TaskComplete) ProtoMessage() {} func (x *TaskComplete) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1122,11 +1070,9 @@ type DoLog struct { func (x *DoLog) Reset() { *x = DoLog{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoLog) String() string { @@ -1137,7 +1083,7 @@ func (*DoLog) ProtoMessage() {} func (x *DoLog) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1167,11 +1113,9 @@ type GetCount struct { func (x *GetCount) Reset() { *x = GetCount{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetCount) String() string { @@ -1182,7 +1126,7 @@ func (*GetCount) ProtoMessage() {} func (x *GetCount) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1207,11 +1151,9 @@ type Count struct { func (x *Count) Reset() { *x = Count{} - if protoimpl.UnsafeEnabled { - mi := &file_test_test_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_test_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Count) String() string { @@ -1222,7 +1164,7 @@ func (*Count) ProtoMessage() {} func (x *Count) ProtoReflect() protoreflect.Message { mi := &file_test_test_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1369,356 +1311,6 @@ func file_test_test_proto_init() { if File_test_test_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_test_test_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*TestReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*TestPanic); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*TestTimeout); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Reply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*TestSend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*TestRemoteSend); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*Account); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*CreateAccount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*CreditAccount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*AccountCreated); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*AccountCredited); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*DebitAccount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*AccountDebited); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*TestLogin); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*TestLoginSuccess); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*TestReadiness); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*TestReady); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*TestBye); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*TestStash); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*TestUnstash); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*TestUnstashAll); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*Ping); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*Pong); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*Wait); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*RunTask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*TaskComplete); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*DoLog); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*GetCount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_test_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*Count); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{