diff --git a/.gitignore b/.gitignore index f0b7911dbef..7bfae9cc392 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,8 @@ go_env.properties mage_output_file.go elastic_agent fleet.yml +fleet.enc +fleet.enc.lock # Editor swap files *.swp diff --git a/control.proto b/control.proto index efd063822de..a1a7a3f8b82 100644 --- a/control.proto +++ b/control.proto @@ -9,16 +9,23 @@ package cproto; option cc_enable_arenas = true; option go_package = "internal/pkg/agent/control/cproto"; -// Status codes for the current state. -enum Status { +// State codes for the current state. +enum State { STARTING = 0; CONFIGURING = 1; HEALTHY = 2; DEGRADED = 3; FAILED = 4; STOPPING = 5; - UPGRADING = 6; - ROLLBACK = 7; + STOPPED = 6; + UPGRADING = 7; + ROLLBACK = 8; +} + +// Unit Type running inside a component. +enum UnitType { + INPUT = 0; + OUTPUT = 1; } // Action status codes for restart and upgrade response. @@ -93,18 +100,43 @@ message UpgradeResponse { string error = 3; } -// Current status of the application in Elastic Agent. -message ApplicationStatus { - // Unique application ID. +message ComponentUnitState { + // Type of unit in the component. + UnitType unit_type = 1; + // ID of the unit in the component. + string unit_id = 2; + // Current state. + State state = 3; + // Current state message. + string message = 4; + // Current state payload. + string payload = 5; +} + +// Version information reported by the component to Elastic Agent. +message ComponentVersionInfo { + // Name of the component. + string name = 1; + // Version of the component. + string version = 2; + // Extra meta information about the version. + map meta = 3; +} + +// Current state of a running component by Elastic Agent. +message ComponentState { + // Unique component ID. string id = 1; - // Application name. + // Component name. string name = 2; - // Current status. - Status status = 3; - // Current status message. + // Current state. + State state = 3; + // Current state message. string message = 4; - // Current status payload. - string payload = 5; + // Current units running in the component. + repeated ComponentUnitState units = 5; + // Current version information for the running component. + ComponentVersionInfo version_info = 6; } // Current metadata for a running process. @@ -126,14 +158,14 @@ message ProcMeta { string error = 15; } -// Status is the current status of Elastic Agent. -message StatusResponse { - // Overall status of Elastic Agent. - Status status = 1; +// StateResponse is the current state of Elastic Agent. +message StateResponse { + // Overall state of Elastic Agent. + State state = 1; // Overall status message of Elastic Agent. string message = 2; - // Status of each application in Elastic Agent. - repeated ApplicationStatus applications = 3; + // Status of each component in Elastic Agent. + repeated ComponentState components = 3; } // ProcMetaResponse is the current running version infomation for all processes. @@ -184,8 +216,8 @@ service ElasticAgentControl { // Fetches the currently running version of the Elastic Agent. rpc Version(Empty) returns (VersionResponse); - // Fetches the currently status of the Elastic Agent. - rpc Status(Empty) returns (StatusResponse); + // Fetches the currently states of the Elastic Agent. + rpc State(Empty) returns (StateResponse); // Restart restarts the current running Elastic Agent. rpc Restart(Empty) returns (RestartResponse); diff --git a/internal/pkg/agent/application/pipeline/actions/action.go b/internal/pkg/agent/application/actions/action.go similarity index 79% rename from internal/pkg/agent/application/pipeline/actions/action.go rename to internal/pkg/agent/application/actions/action.go index 794ee5ca3df..120316e1dfb 100644 --- a/internal/pkg/agent/application/pipeline/actions/action.go +++ b/internal/pkg/agent/application/actions/action.go @@ -7,14 +7,14 @@ package actions import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" ) // Handler handles action coming from fleet. type Handler interface { - Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error + Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error } // ClientSetter sets the client for communication. diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go similarity index 77% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go rename to internal/pkg/agent/application/actions/handlers/handler_action_application.go index 8d8ce830421..d36f8f1d33a 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -9,11 +9,14 @@ import ( "fmt" "time" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) const ( @@ -25,27 +28,28 @@ var errActionTimeoutInvalid = errors.New("action timeout is invalid") // AppAction is a handler for application actions. type AppAction struct { - log *logger.Logger - srv *server.Server + log *logger.Logger + coord *coordinator.Coordinator } // NewAppAction creates a new AppAction handler. -func NewAppAction(log *logger.Logger, srv *server.Server) *AppAction { +func NewAppAction(log *logger.Logger, coord *coordinator.Coordinator) *AppAction { return &AppAction{ - log: log, - srv: srv, + log: log, + coord: coord, } } // Handle handles application action. -func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerAppAction: action '%+v' received", a) action, ok := a.(*fleetapi.ActionApp) if !ok { return fmt.Errorf("invalid type, expected ActionApp and received %T", a) } - appState, ok := h.srv.FindByInputType(action.InputType) + state := h.coord.State() + unit, ok := findUnitFromInputType(state, action.InputType) if !ok { // If the matching action is not found ack the action with the error for action result document action.StartedAt = time.Now().UTC().Format(time.RFC3339Nano) @@ -71,8 +75,10 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker store.F var res map[string]interface{} if err == nil { - h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.InputType, timeout) - res, err = appState.PerformAction(action.InputType, params, timeout) + h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.ActionType, timeout) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + res, err = h.coord.PerformAction(ctx, unit, action.ActionType, params) } end := time.Now().UTC() @@ -143,3 +149,17 @@ func readMapString(m map[string]interface{}, key string, def string) string { } return def } + +func findUnitFromInputType(state coordinator.State, inputType string) (component.Unit, bool) { + for _, comp := range state.Components { + for _, unit := range comp.Component.Units { + if unit.Type == client.UnitTypeInput { + it, ok := unit.Config["type"] + if ok && it == inputType { + return unit, true + } + } + } + } + return component.Unit{}, false +} diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go similarity index 92% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go rename to internal/pkg/agent/application/actions/handlers/handler_action_cancel.go index a2208c7294d..bb48b2bd753 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go @@ -8,8 +8,8 @@ import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -32,7 +32,7 @@ func NewCancel(log *logger.Logger, cancel queueCanceler) *Cancel { } // Handle will cancel any actions in the queue that match target_id. -func (h *Cancel) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Cancel) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { action, ok := a.(*fleetapi.ActionCancel) if !ok { return fmt.Errorf("invalid type, expected ActionCancel and received %T", a) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go similarity index 86% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index 3775d12b352..a3f4ff0b3ea 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -15,15 +15,15 @@ import ( "gopkg.in/yaml.v2" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -36,28 +36,28 @@ const ( // PolicyChange is a handler for POLICY_CHANGE action. type PolicyChange struct { log *logger.Logger - emitter pipeline.EmitterFunc agentInfo *info.AgentInfo config *configuration.Configuration store storage.Store + ch chan coordinator.ConfigChange setters []actions.ClientSetter } // NewPolicyChange creates a new PolicyChange handler. func NewPolicyChange( log *logger.Logger, - emitter pipeline.EmitterFunc, agentInfo *info.AgentInfo, config *configuration.Configuration, store storage.Store, + ch chan coordinator.ConfigChange, setters ...actions.ClientSetter, ) *PolicyChange { return &PolicyChange{ log: log, - emitter: emitter, agentInfo: agentInfo, config: config, store: store, + ch: ch, setters: setters, } } @@ -72,7 +72,7 @@ func (h *PolicyChange) AddSetter(cs actions.ClientSetter) { } // Handle handles policy change action. -func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerPolicyChange: action '%+v' received", a) action, ok := a.(*fleetapi.ActionPolicyChange) if !ok { @@ -89,11 +89,19 @@ func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker stor if err != nil { return err } - if err := h.emitter(ctx, c); err != nil { - return err + + h.ch <- &policyChange{ + ctx: ctx, + cfg: c, + action: a, + acker: acker, } + return nil +} - return acker.Ack(ctx, action) +// Watch returns the channel for configuration change notifications. +func (h *PolicyChange) Watch() <-chan coordinator.ConfigChange { + return h.ch } func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Config) (err error) { @@ -210,3 +218,33 @@ func fleetToReader(agentInfo *info.AgentInfo, cfg *configuration.Configuration) } return bytes.NewReader(data), nil } + +type policyChange struct { + ctx context.Context + cfg *config.Config + action fleetapi.Action + acker acker.Acker + commit bool +} + +func (l *policyChange) Config() *config.Config { + return l.cfg +} + +func (l *policyChange) Ack() error { + if l.action == nil { + return nil + } + err := l.acker.Ack(l.ctx, l.action) + if err != nil { + return err + } + if l.commit { + return l.acker.Commit(l.ctx) + } + return nil +} + +func (l *policyChange) Fail(_ error) { + // do nothing +} diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go similarity index 56% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go index d887e755154..34114153875 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go @@ -9,38 +9,27 @@ import ( "sync" "testing" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/pkg/core/logger" ) -type mockEmitter struct { - err error - policy *config.Config -} - -func (m *mockEmitter) Emitter(_ context.Context, policy *config.Config) error { - m.policy = policy - return m.err -} - func TestPolicyChange(t *testing.T) { log, _ := logger.New("", false) - ack := noopacker.NewAcker() + ack := noopacker.New() agentInfo, _ := info.NewAgentInfo(true) nullStore := &storage.NullStore{} t.Run("Receive a config change and successfully emits a raw configuration", func(t *testing.T) { - emitter := &mockEmitter{} + ch := make(chan coordinator.ConfigChange, 1) conf := map[string]interface{}{"hello": "world"} action := &fleetapi.ActionPolicyChange{ @@ -50,41 +39,13 @@ func TestPolicyChange(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } + handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, ack) require.NoError(t, err) - require.Equal(t, config.MustNewConfigFrom(conf), emitter.policy) - }) - - t.Run("Receive a config and fail to emits a raw configuration", func(t *testing.T) { - mockErr := errors.New("error returned") - emitter := &mockEmitter{err: mockErr} - - conf := map[string]interface{}{"hello": "world"} - action := &fleetapi.ActionPolicyChange{ - ActionID: "abc123", - ActionType: "POLICY_CHANGE", - Policy: conf, - } - - cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } - err := handler.Handle(context.Background(), action, ack) - require.Error(t, err) + change := <-ch + require.Equal(t, config.MustNewConfigFrom(conf), change.Config()) }) } @@ -93,41 +54,10 @@ func TestPolicyAcked(t *testing.T) { agentInfo, _ := info.NewAgentInfo(true) nullStore := &storage.NullStore{} - t.Run("Config change should not ACK on error", func(t *testing.T) { - tacker := &testAcker{} - - mockErr := errors.New("error returned") - emitter := &mockEmitter{err: mockErr} - - config := map[string]interface{}{"hello": "world"} - actionID := "abc123" - action := &fleetapi.ActionPolicyChange{ - ActionID: actionID, - ActionType: "POLICY_CHANGE", - Policy: config, - } - - cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } - - err := handler.Handle(context.Background(), action, tacker) - require.Error(t, err) - - actions := tacker.Items() - assert.EqualValues(t, 0, len(actions)) - }) - t.Run("Config change should ACK", func(t *testing.T) { + ch := make(chan coordinator.ConfigChange, 1) tacker := &testAcker{} - emitter := &mockEmitter{} - config := map[string]interface{}{"hello": "world"} actionID := "abc123" action := &fleetapi.ActionPolicyChange{ @@ -137,17 +67,14 @@ func TestPolicyAcked(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } + handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, tacker) require.NoError(t, err) + change := <-ch + require.NoError(t, change.Ack()) + actions := tacker.Items() assert.EqualValues(t, 1, len(actions)) assert.Equal(t, actionID, actions[0]) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go similarity index 91% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go index 962447b8a35..2044052d48b 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go @@ -7,8 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewPolicyReassign(log *logger.Logger) *PolicyReassign { } // Handle handles POLICY_REASSIGN action. -func (h *PolicyReassign) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *PolicyReassign) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerPolicyReassign: action '%+v' received", a) if err := acker.Ack(ctx, a); err != nil { diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go similarity index 87% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go rename to internal/pkg/agent/application/actions/handlers/handler_action_settings.go index 5418a0f3eb6..eed67a50682 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go @@ -8,11 +8,12 @@ import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -23,25 +24,25 @@ type reexecManager interface { // Settings handles settings change coming from fleet and updates log level. type Settings struct { log *logger.Logger - reexec reexecManager agentInfo *info.AgentInfo + coord *coordinator.Coordinator } // NewSettings creates a new Settings handler. func NewSettings( log *logger.Logger, - reexec reexecManager, agentInfo *info.AgentInfo, + coord *coordinator.Coordinator, ) *Settings { return &Settings{ log: log, - reexec: reexec, agentInfo: agentInfo, + coord: coord, } } // Handle handles SETTINGS action. -func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerUpgrade: action '%+v' received", a) action, ok := a.(*fleetapi.ActionSettings) if !ok { @@ -62,7 +63,7 @@ func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker store.Fl h.log.Errorf("failed to commit acker after acknowledging action with id '%s'", action.ActionID) } - h.reexec.ReExec(nil) + h.coord.ReExec(nil) return nil } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go similarity index 67% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go rename to internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go index 71fe0f30644..045d52a4fcf 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go @@ -8,10 +8,10 @@ import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -27,8 +27,7 @@ type stateStore interface { // For it to be operational again it needs to be either enrolled or reconfigured. type Unenroll struct { log *logger.Logger - emitter pipeline.EmitterFunc - dispatcher pipeline.Router + ch chan coordinator.ConfigChange closers []context.CancelFunc stateStore stateStore } @@ -36,43 +35,40 @@ type Unenroll struct { // NewUnenroll creates a new Unenroll handler. func NewUnenroll( log *logger.Logger, - emitter pipeline.EmitterFunc, - dispatcher pipeline.Router, + ch chan coordinator.ConfigChange, closers []context.CancelFunc, stateStore stateStore, ) *Unenroll { return &Unenroll{ log: log, - emitter: emitter, - dispatcher: dispatcher, + ch: ch, closers: closers, stateStore: stateStore, } } // Handle handles UNENROLL action. -func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerUnenroll: action '%+v' received", a) action, ok := a.(*fleetapi.ActionUnenroll) if !ok { return fmt.Errorf("invalid type, expected ActionUnenroll and received %T", a) } - // Providing empty map will close all pipelines - noPrograms := make(map[pipeline.RoutingKey][]program.Program) - _ = h.dispatcher.Route(ctx, a.ID(), noPrograms) + if action.IsDetected { + // not from Fleet; so we set it to nil so policyChange doesn't ack it + a = nil + } - if !action.IsDetected { - // ACK only events received from fleet. - if err := acker.Ack(ctx, action); err != nil { - return err - } + h.ch <- &policyChange{ + ctx: ctx, + cfg: config.New(), + action: a, + acker: acker, + commit: true, + } - // commit all acks before quitting. - if err := acker.Commit(ctx); err != nil { - return err - } - } else if h.stateStore != nil { + if h.stateStore != nil { // backup action for future start to avoid starting fleet gateway loop h.stateStore.Add(a) h.stateStore.Save() diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go similarity index 60% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go rename to internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go index cfc7ea83749..1760c96d369 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go @@ -8,9 +8,9 @@ import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -18,42 +18,25 @@ import ( // After running Upgrade agent should download its own version specified by action // from repository specified by fleet. type Upgrade struct { - log *logger.Logger - upgrader *upgrade.Upgrader + log *logger.Logger + coord *coordinator.Coordinator } // NewUpgrade creates a new Upgrade handler. -func NewUpgrade(log *logger.Logger, upgrader *upgrade.Upgrader) *Upgrade { +func NewUpgrade(log *logger.Logger, coord *coordinator.Coordinator) *Upgrade { return &Upgrade{ - log: log, - upgrader: upgrader, + log: log, + coord: coord, } } // Handle handles UPGRADE action. -func (h *Upgrade) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Upgrade) Handle(ctx context.Context, a fleetapi.Action, _ acker.Acker) error { h.log.Debugf("handlerUpgrade: action '%+v' received", a) action, ok := a.(*fleetapi.ActionUpgrade) if !ok { return fmt.Errorf("invalid type, expected ActionUpgrade and received %T", a) } - _, err := h.upgrader.Upgrade(ctx, &upgradeAction{action}, true) - return err -} - -type upgradeAction struct { - *fleetapi.ActionUpgrade -} - -func (a *upgradeAction) Version() string { - return a.ActionUpgrade.Version -} - -func (a *upgradeAction) SourceURI() string { - return a.ActionUpgrade.SourceURI -} - -func (a *upgradeAction) FleetAction() *fleetapi.ActionUpgrade { - return a.ActionUpgrade + return h.coord.Upgrade(ctx, action.Version, action.SourceURI, action) } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go b/internal/pkg/agent/application/actions/handlers/handler_default.go similarity index 88% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go rename to internal/pkg/agent/application/actions/handlers/handler_default.go index 873c3fd7c5a..dd59861f584 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go +++ b/internal/pkg/agent/application/actions/handlers/handler_default.go @@ -7,8 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewDefault(log *logger.Logger) *Default { } // Handle is a default handler, no action is taken. -func (h *Default) Handle(_ context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Default) Handle(_ context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Errorf("HandlerDefault: action '%+v' received", a) return nil } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go b/internal/pkg/agent/application/actions/handlers/handler_unknown.go similarity index 88% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go rename to internal/pkg/agent/application/actions/handlers/handler_unknown.go index 58e0640fe4d..e0fdf4c81ab 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go +++ b/internal/pkg/agent/application/actions/handlers/handler_unknown.go @@ -7,8 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewUnknown(log *logger.Logger) *Unknown { } // Handle handles unknown actions, no action is taken. -func (h *Unknown) Handle(_ context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Unknown) Handle(_ context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Errorf("HandlerUnknown: action '%+v' received", a) return nil } diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 7bc0089940f..788e189cb60 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -5,102 +5,158 @@ package application import ( - "context" "fmt" + "path/filepath" + goruntime "runtime" + "strconv" "go.elastic.co/apm" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/go-sysinfo" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/capabilities" + "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/internal/pkg/dir" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) -// Application is the application interface implemented by the different running mode. -type Application interface { - Start() error - Stop() error - AgentInfo() *info.AgentInfo - Routes() *sorted.Set -} +type discoverFunc func() ([]string, error) -type reexecManager interface { - ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) -} +// ErrNoConfiguration is returned when no configuration are found. +var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) -type upgraderControl interface { - SetUpgrader(upgrader *upgrade.Upgrader) -} +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail component.PlatformDetail) component.PlatformDetail // New creates a new Agent and bootstrap the required subsystem. func New( log *logger.Logger, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, agentInfo *info.AgentInfo, + reexec coordinator.ReExecManager, tracer *apm.Tracer, -) (Application, error) { - // Load configuration from disk to understand in which mode of operation - // we must start the elastic-agent, the mode of operation cannot be changed without restarting the - // elastic-agent. + modifiers ...PlatformModifier, +) (*coordinator.Coordinator, error) { + platform, err := getPlatformDetail(modifiers...) + if err != nil { + return nil, fmt.Errorf("failed to gather system information: %w", err) + } + log.Info("Gathered system information") + + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) + if err != nil { + return nil, fmt.Errorf("failed to detect inputs and outputs: %w", err) + } + log.With("inputs", specs.Inputs()).Info("Detected available inputs and outputs") + + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) + if err != nil { + return nil, fmt.Errorf("failed to determine capabilities: %w", err) + } + log.Info("Determined allowed capabilities") + pathConfigFile := paths.ConfigFile() rawConfig, err := config.LoadFile(pathConfigFile) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load configuration: %w", err) } - if err := info.InjectAgentConfig(rawConfig); err != nil { - return nil, err + return nil, fmt.Errorf("failed to load configuration: %w", err) + } + cfg, err := configuration.NewFromConfig(rawConfig) + if err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) } - return createApplication(log, pathConfigFile, rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) -} + upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig) -func createApplication( - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (Application, error) { - log.Info("Detecting execution mode") - ctx := context.Background() - cfg, err := configuration.NewFromConfig(rawConfig) + runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) } + var configMgr coordinator.ConfigManager + var managed *managedConfigManager + var compModifiers []coordinator.ComponentsModifier if configuration.IsStandalone(cfg.Fleet) { - log.Info("Agent is managed locally") - return newLocal(ctx, log, paths.ConfigFile(), rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) + log.Info("Parsed configuration and determined agent is managed locally") + + loader := config.NewLoader(log, externalConfigsGlob()) + discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) + if !cfg.Settings.Reload.Enabled { + log.Debug("Reloading of configuration is off") + configMgr = newOnce(log, discover, loader) + } else { + log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) + configMgr = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader) + } + } else if configuration.IsFleetServerBootstrap(cfg.Fleet) { + log.Info("Parsed configuration and determined agent is in Fleet Server bootstrap mode") + compModifiers = append(compModifiers, FleetServerComponentModifier) + configMgr, err = newFleetServerBootstrapManager(log) + if err != nil { + return nil, err + } + } else { + var store storage.Store + store, cfg, err = mergeFleetConfig(rawConfig) + if err != nil { + return nil, err + } + + log.Info("Parsed configuration and determined agent is managed by Fleet") + + compModifiers = append(compModifiers, FleetServerComponentModifier) + managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) + if err != nil { + return nil, err + } + configMgr = managed } - // not in standalone; both modes require reading the fleet.yml configuration file - var store storage.Store - store, cfg, err = mergeFleetConfig(rawConfig) + composable, err := composable.New(log, rawConfig) if err != nil { - return nil, err + return nil, errors.New(err, "failed to initialize composable controller") } - if configuration.IsFleetServerBootstrap(cfg.Fleet) { - log.Info("Agent is in Fleet Server bootstrap mode") - return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo, tracer) + coord := coordinator.New(log, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) + if managed != nil { + // the coordinator requires the config manager as well as in managed-mode the config manager requires the + // coordinator, so it must be set here once the coordinator is created + managed.coord = coord } + return coord, nil +} - log.Info("Agent is managed by Fleet") - return newManaged(ctx, log, store, cfg, rawConfig, reexec, statusCtrl, agentInfo, tracer) +func getPlatformDetail(modifiers ...PlatformModifier) (component.PlatformDetail, error) { + info, err := sysinfo.Host() + if err != nil { + return component.PlatformDetail{}, err + } + os := info.Info().OS + detail := component.PlatformDetail{ + Platform: component.Platform{ + OS: goruntime.GOOS, + Arch: goruntime.GOARCH, + GOOS: goruntime.GOOS, + }, + Family: os.Family, + Major: strconv.Itoa(os.Major), + Minor: strconv.Itoa(os.Minor), + } + for _, modifier := range modifiers { + detail = modifier(detail) + } + return detail, nil } func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.Configuration, error) { @@ -146,3 +202,28 @@ func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.C return store, cfg, nil } + +func externalConfigsGlob() string { + return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) +} + +func discoverer(patterns ...string) discoverFunc { + var p []string + for _, newP := range patterns { + if len(newP) == 0 { + continue + } + + p = append(p, newP) + } + + if len(p) == 0 { + return func() ([]string, error) { + return []string{}, ErrNoConfiguration + } + } + + return func() ([]string, error) { + return dir.DiscoverFiles(p...) + } +} diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go new file mode 100644 index 00000000000..dac48400179 --- /dev/null +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -0,0 +1,592 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package coordinator + +import ( + "context" + "errors" + "fmt" + + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + + "go.elastic.co/apm" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/capabilities" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +var ( + // ErrNotUpgradable error is returned when upgrade cannot be performed. + ErrNotUpgradable = errors.New( + "cannot be upgraded; must be installed with install sub-command and " + + "running under control of the systems supervisor") +) + +// ReExecManager provides an interface to perform re-execution of the entire agent. +type ReExecManager interface { + ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) +} + +// UpgradeManager provides an interface to perform the upgrade action for the agent. +type UpgradeManager interface { + // Upgradeable returns true if can be upgraded. + Upgradeable() bool + + // Upgrade upgrades running agent. + Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) +} + +// Runner provides interface to run a manager and receive running errors. +type Runner interface { + // Run runs the manager. + Run(context.Context) error + + // Errors returns the channel to listen to errors on. + // + // A manager should send a nil error to clear its previous error when it should no longer report as an error. + Errors() <-chan error +} + +// RuntimeManager provides an interface to run and update the runtime. +type RuntimeManager interface { + Runner + + // Update updates the current components model. + Update([]component.Component) error + + // State returns the current components model state. + State() []runtime.ComponentComponentState + + // PerformAction executes an action on a unit. + PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) + + // SubscribeAll provides an interface to watch for changes in all components. + SubscribeAll(context.Context) *runtime.SubscriptionAll +} + +// ConfigChange provides an interface for receiving a new configuration. +// +// Ack must be called if the configuration change was accepted and Fail should be called if it fails to be accepted. +type ConfigChange interface { + // Config returns the configuration for this change. + Config() *config.Config + + // Ack marks the configuration change as accepted. + Ack() error + + // Fail marks the configuration change as failed. + Fail(err error) +} + +// ErrorReporter provides an interface for any manager that is handled by the coordinator to report errors. +type ErrorReporter interface { +} + +// ConfigManager provides an interface to run and watch for configuration changes. +type ConfigManager interface { + Runner + + // Watch returns the chanel to watch for configuration changes. + Watch() <-chan ConfigChange +} + +// VarsManager provides an interface to run and watch for variable changes. +type VarsManager interface { + Runner + + // Watch returns the chanel to watch for variable changes. + Watch() <-chan []*transpiler.Vars +} + +// ComponentsModifier is a function that takes the computed components model and modifies it before +// passing it into the components runtime manager. +type ComponentsModifier func(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) + +// State provides the current state of the coordinator along with all the current states of components and units. +type State struct { + State agentclient.State + Message string + Components []runtime.ComponentComponentState +} + +// StateFetcher provides an interface to fetch the current state of the coordinator. +type StateFetcher interface { + // State returns the current state of the coordinator. + State() State +} + +// Coordinator manages the entire state of the Elastic Agent. +// +// All configuration changes, update variables, and upgrade actions are managed and controlled by the coordinator. +type Coordinator struct { + logger *logger.Logger + + specs component.RuntimeSpecs + + reexecMgr ReExecManager + upgradeMgr UpgradeManager + + runtimeMgr RuntimeManager + runtimeMgrErr error + configMgr ConfigManager + configMgrErr error + varsMgr VarsManager + varsMgrErr error + + caps capabilities.Capability + modifiers []ComponentsModifier + + state coordinatorState +} + +// New creates a new coordinator. +func New(logger *logger.Logger, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { + return &Coordinator{ + logger: logger, + specs: specs, + reexecMgr: reexecMgr, + upgradeMgr: upgradeMgr, + runtimeMgr: runtimeMgr, + configMgr: configMgr, + varsMgr: varsMgr, + caps: caps, + modifiers: modifiers, + state: coordinatorState{ + state: agentclient.Starting, + }, + } +} + +// State returns the current state for the coordinator. +func (c *Coordinator) State() (s State) { + s.State = c.state.state + s.Message = c.state.message + s.Components = c.runtimeMgr.State() + if c.state.overrideState != nil { + // state has been overridden due to an action that is occurring + s.State = c.state.overrideState.state + s.Message = c.state.overrideState.message + } else if s.State == agentclient.Healthy { + // if any of the managers are reporting an error then something is wrong + // or + // coordinator overall is reported is healthy; in the case any component or unit is not healthy then we report + // as degraded because we are not fully healthy + if c.runtimeMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.runtimeMgrErr.Error() + } else if c.configMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.configMgrErr.Error() + } else if c.varsMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.varsMgrErr.Error() + } else if hasState(s.Components, client.UnitStateFailed) { + s.State = agentclient.Degraded + s.Message = "1 or more components/units in a failed state" + } else if hasState(s.Components, client.UnitStateDegraded) { + s.State = agentclient.Degraded + s.Message = "1 or more components/units in a degraded state" + } + } + return s +} + +// ReExec performs the re-execution. +func (c *Coordinator) ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) { + // override the overall state to stopping until the re-execution is complete + c.state.overrideState = &coordinatorOverrideState{ + state: agentclient.Stopping, + message: "Re-executing", + } + c.reexecMgr.ReExec(callback, argOverrides...) +} + +// Upgrade runs the upgrade process. +func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) error { + // early check outside of upgrader before overridding the state + if c.upgradeMgr.Upgradeable() { + return ErrNotUpgradable + } + + // early check capabilities to ensure this upgrade actions is allowed + if c.caps != nil { + if _, err := c.caps.Apply(map[string]interface{}{ + "version": version, + "sourceURI": sourceURI, + }); errors.Is(err, capabilities.ErrBlocked) { + return ErrNotUpgradable + } + } + + // override the overall state to upgrading until the re-execution is complete + c.state.overrideState = &coordinatorOverrideState{ + state: agentclient.Upgrading, + message: fmt.Sprintf("Upgrading to version %s", version), + } + cb, err := c.upgradeMgr.Upgrade(ctx, version, sourceURI, action) + if err != nil { + c.state.overrideState = nil + return err + } + c.ReExec(cb) + return nil +} + +// PerformAction executes an action on a unit. +func (c *Coordinator) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { + return c.runtimeMgr.PerformAction(ctx, unit, name, params) +} + +// Run runs the coordinator. +// +// The RuntimeManager, ConfigManager and VarsManager that is passed into NewCoordinator are also ran and lifecycle controlled by the Run. +// +// In the case that either of the above managers fail, they will all be restarted unless the context was explicitly cancelled or timed out. +func (c *Coordinator) Run(ctx context.Context) error { + // log all changes in the state of the runtime + go func() { + state := make(map[string]coordinatorComponentLogState) + + sub := c.runtimeMgr.SubscribeAll(ctx) + for { + select { + case <-ctx.Done(): + return + case s := <-sub.Ch(): + logState := newCoordinatorComponentLogState(&s) + _, ok := state[s.Component.ID] + if !ok { + c.logger.With("component", logState).Info("New component created") + } else { + c.logger.With("component", logState).Info("Existing component state changed") + } + state[s.Component.ID] = logState + if s.State.State == client.UnitStateStopped { + delete(state, s.Component.ID) + } + } + } + }() + + for { + c.state.state = agentclient.Starting + c.state.message = "Waiting for initial configuration and composable variables" + err := c.runner(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + c.state.state = agentclient.Stopped + c.state.message = "Requested to be stopped" + // do not restart + return err + } + } + c.state.state = agentclient.Failed + c.state.message = fmt.Sprintf("Coordinator failed and will be restarted: %s", err) + c.logger.Errorf("coordinator failed and will be restarted: %s", err) + } +} + +// runner performs the actual work of running all the managers +// +// if one of the managers fails the others are also stopped and then the whole runner returns +func (c *Coordinator) runner(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + runtimeWatcher := c.runtimeMgr + runtimeRun := make(chan bool) + runtimeErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(runtimeRun) + runtimeErrCh <- err + }(runtimeWatcher) + + configWatcher := c.configMgr + configRun := make(chan bool) + configErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(configRun) + configErrCh <- err + }(configWatcher) + + varsWatcher := c.varsMgr + varsRun := make(chan bool) + varsErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(varsRun) + varsErrCh <- err + }(varsWatcher) + + for { + select { + case <-ctx.Done(): + runtimeErr := <-runtimeErrCh + c.runtimeMgrErr = runtimeErr + configErr := <-configErrCh + c.configMgrErr = configErr + varsErr := <-varsErrCh + c.varsMgrErr = varsErr + if runtimeErr != nil && !errors.Is(runtimeErr, context.Canceled) { + return runtimeErr + } + if configErr != nil && !errors.Is(configErr, context.Canceled) { + return configErr + } + if varsErr != nil && !errors.Is(varsErr, context.Canceled) { + return varsErr + } + return ctx.Err() + case <-runtimeRun: + if ctx.Err() == nil { + cancel() + } + case <-configRun: + if ctx.Err() == nil { + cancel() + } + case <-varsRun: + if ctx.Err() == nil { + cancel() + } + case runtimeErr := <-c.runtimeMgr.Errors(): + c.runtimeMgrErr = runtimeErr + case configErr := <-c.configMgr.Errors(): + c.configMgrErr = configErr + case varsErr := <-c.varsMgr.Errors(): + c.varsMgrErr = varsErr + case change := <-configWatcher.Watch(): + if ctx.Err() == nil { + if err := c.processConfig(ctx, change.Config()); err != nil { + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + change.Fail(err) + } else { + if err := change.Ack(); err != nil { + err = fmt.Errorf("failed to ack configuration change: %w", err) + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + } + } + } + case vars := <-varsWatcher.Watch(): + if ctx.Err() == nil { + if err := c.processVars(ctx, vars); err != nil { + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + } + } + } + } +} + +func (c *Coordinator) processConfig(ctx context.Context, cfg *config.Config) (err error) { + span, ctx := apm.StartSpan(ctx, "config", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + if err := info.InjectAgentConfig(cfg); err != nil { + return err + } + + // perform and verify ast translation + m, err := cfg.ToMapStr() + if err != nil { + return fmt.Errorf("could not create the AST from the configuration: %w", err) + } + rawAst, err := transpiler.NewAST(m) + if err != nil { + return fmt.Errorf("could not create the AST from the configuration: %w", err) + } + + if c.caps != nil { + var ok bool + updatedAst, err := c.caps.Apply(rawAst) + if err != nil { + return fmt.Errorf("failed to apply capabilities: %w", err) + } + + rawAst, ok = updatedAst.(*transpiler.AST) + if !ok { + return fmt.Errorf("failed to transform object returned from capabilities to AST: %w", err) + } + } + + c.state.config = cfg + c.state.ast = rawAst + + if c.state.vars != nil { + return c.process(ctx) + } + return nil +} + +func (c *Coordinator) processVars(ctx context.Context, vars []*transpiler.Vars) (err error) { + span, ctx := apm.StartSpan(ctx, "vars", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + c.state.vars = vars + + if c.state.ast != nil { + return c.process(ctx) + } + return nil +} + +func (c *Coordinator) process(ctx context.Context) (err error) { + span, ctx := apm.StartSpan(ctx, "process", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + ast := c.state.ast.Clone() + inputs, ok := transpiler.Lookup(ast, "inputs") + if ok { + renderedInputs, err := transpiler.RenderInputs(inputs, c.state.vars) + if err != nil { + return fmt.Errorf("rendering inputs failed: %w", err) + } + err = transpiler.Insert(ast, renderedInputs, "inputs") + if err != nil { + return fmt.Errorf("inserting rendered inputs failed: %w", err) + } + } + + cfg, err := ast.Map() + if err != nil { + return fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) + } + comps, err := c.specs.ToComponents(cfg) + if err != nil { + return fmt.Errorf("failed to render components: %w", err) + } + + for _, modifier := range c.modifiers { + comps, err = modifier(comps, cfg) + if err != nil { + return fmt.Errorf("failed to modify components: %w", err) + } + } + + c.logger.Info("Updating running component model") + c.logger.With("components", comps).Debug("Updating running component model") + err = c.runtimeMgr.Update(comps) + if err != nil { + return err + } + c.state.state = agentclient.Healthy + c.state.message = "Running" + return nil +} + +type coordinatorState struct { + state agentclient.State + message string + overrideState *coordinatorOverrideState + + config *config.Config + ast *transpiler.AST + vars []*transpiler.Vars + components []component.Component +} + +type coordinatorOverrideState struct { + state agentclient.State + message string +} + +type coordinatorComponentLogState struct { + ID string `json:"id"` + State string `json:"state"` + Message string `json:"message"` + Inputs []coordinatorComponentUnitLogState `json:"inputs"` + Output coordinatorComponentUnitLogState `json:"output,omitempty"` +} + +type coordinatorComponentUnitLogState struct { + ID string `json:"id"` + State string `json:"state"` + Message string `json:"message"` +} + +func newCoordinatorComponentLogState(state *runtime.ComponentComponentState) coordinatorComponentLogState { + var output coordinatorComponentUnitLogState + inputs := make([]coordinatorComponentUnitLogState, 0, len(state.State.Units)) + for key, unit := range state.State.Units { + if key.UnitType == client.UnitTypeInput { + inputs = append(inputs, coordinatorComponentUnitLogState{ + ID: key.UnitID, + State: newCoordinatorComponentStateStr(unit.State), + Message: unit.Message, + }) + } else { + output = coordinatorComponentUnitLogState{ + ID: key.UnitID, + State: newCoordinatorComponentStateStr(unit.State), + Message: unit.Message, + } + } + } + return coordinatorComponentLogState{ + ID: state.Component.ID, + State: newCoordinatorComponentStateStr(state.State.State), + Message: state.State.Message, + Inputs: inputs, + Output: output, + } +} + +func newCoordinatorComponentStateStr(state client.UnitState) string { + switch state { + case client.UnitStateStarting: + return "Starting" + case client.UnitStateConfiguring: + return "Configuring" + case client.UnitStateDegraded: + return "Degraded" + case client.UnitStateHealthy: + return "Healthy" + case client.UnitStateFailed: + return "Failed" + case client.UnitStateStopping: + return "Stopping" + case client.UnitStateStopped: + return "Stopped" + } + return "Unknown" +} + +func hasState(components []runtime.ComponentComponentState, state client.UnitState) bool { + for _, comp := range components { + if comp.State.State == state { + return true + } + for _, unit := range comp.State.Units { + if unit.State == state { + return true + } + } + } + return false +} diff --git a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go similarity index 76% rename from internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go rename to internal/pkg/agent/application/dispatcher/dispatcher.go index 6f036b57b21..8628cf5a59f 100644 --- a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -12,25 +12,29 @@ import ( "go.elastic.co/apm" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) type actionHandlers map[string]actions.Handler +// Dispatcher processes actions coming from fleet api. +type Dispatcher interface { + Dispatch(context.Context, acker.Acker, ...fleetapi.Action) error +} + // ActionDispatcher processes actions coming from fleet using registered set of handlers. type ActionDispatcher struct { - ctx context.Context log *logger.Logger handlers actionHandlers def actions.Handler } // New creates a new action dispatcher. -func New(ctx context.Context, log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { +func New(log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { var err error if log == nil { log, err = logger.New("action_dispatcher", false) @@ -44,7 +48,6 @@ func New(ctx context.Context, log *logger.Logger, def actions.Handler) (*ActionD } return &ActionDispatcher{ - ctx: ctx, log: log, handlers: make(actionHandlers), def: def, @@ -76,21 +79,13 @@ func (ad *ActionDispatcher) key(a fleetapi.Action) string { } // Dispatch dispatches an action using pre-registered set of handlers. -// ctx is used here ONLY to carry the span, for cancelation use the cancel -// function of the ActionDispatcher.ctx. -func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker, actions ...fleetapi.Action) (err error) { +func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, actions ...fleetapi.Action) (err error) { span, ctx := apm.StartSpan(ctx, "dispatch", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() span.End() }() - // Creating a child context that carries both the ad.ctx cancelation and - // the span from ctx. - ctx, cancel := context.WithCancel(ad.ctx) - defer cancel() - ctx = apm.ContextWithSpan(ctx, span) - if len(actions) == 0 { ad.log.Debug("No action to dispatch") return nil @@ -103,11 +98,11 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker ) for _, action := range actions { - if err := ad.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return err } - if err := ad.dispatchAction(action, acker); err != nil { + if err := ad.dispatchAction(ctx, action, acker); err != nil { ad.log.Debugf("Failed to dispatch action '%+v', error: %+v", action, err) return err } @@ -117,13 +112,13 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker return acker.Commit(ctx) } -func (ad *ActionDispatcher) dispatchAction(a fleetapi.Action, acker store.FleetAcker) error { +func (ad *ActionDispatcher) dispatchAction(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { handler, found := ad.handlers[(ad.key(a))] if !found { - return ad.def.Handle(ad.ctx, a, acker) + return ad.def.Handle(ctx, a, acker) } - return handler.Handle(ad.ctx, a, acker) + return handler.Handle(ctx, a, acker) } func detectTypes(actions []fleetapi.Action) []string { diff --git a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go b/internal/pkg/agent/application/dispatcher/dispatcher_test.go similarity index 64% rename from internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go rename to internal/pkg/agent/application/dispatcher/dispatcher_test.go index 3c65dd4a2e7..4c19779688a 100644 --- a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher_test.go @@ -9,22 +9,19 @@ import ( "testing" "time" - "go.elastic.co/apm" - "go.elastic.co/apm/apmtest" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" ) type mockHandler struct { mock.Mock } -func (h *mockHandler) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *mockHandler) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { args := h.Called(ctx, a, acker) return args.Error(0) } @@ -61,52 +58,13 @@ func (m *mockAction) Expiration() (time.Time, error) { return args.Get(0).(time.Time), args.Error(1) } -type mockAcker struct { - mock.Mock -} - -func (m *mockAcker) Ack(ctx context.Context, action fleetapi.Action) error { - args := m.Called(ctx, action) - return args.Error(0) -} - -func (m *mockAcker) Commit(ctx context.Context) error { - args := m.Called(ctx) - return args.Error(0) -} - func TestActionDispatcher(t *testing.T) { - ack := noopacker.NewAcker() - - t.Run("Merges ActionDispatcher ctx cancel and Dispatch ctx value", func(t *testing.T) { - action1 := &mockAction{} - def := &mockHandler{} - def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - span := apmtest.NewRecordingTracer(). - StartTransaction("ignore", "ignore"). - StartSpan("ignore", "ignore", nil) - ctx1, cancel := context.WithCancel(context.Background()) - ack := &mockAcker{} - ack.On("Commit", mock.Anything).Run(func(args mock.Arguments) { - ctx, _ := args.Get(0).(context.Context) - require.NoError(t, ctx.Err()) - got := apm.SpanFromContext(ctx) - require.Equal(t, span.TraceContext().Span, got.ParentID()) - cancel() // cancel function from ctx1 - require.Equal(t, ctx.Err(), context.Canceled) - }).Return(nil) - d, err := New(ctx1, nil, def) - require.NoError(t, err) - ctx2 := apm.ContextWithSpan(context.Background(), span) - err = d.Dispatch(ctx2, ack, action1) - require.NoError(t, err) - ack.AssertExpectations(t) - }) + ack := noop.New() t.Run("Success to dispatch multiples events", func(t *testing.T) { ctx := context.Background() def := &mockHandler{} - d, err := New(ctx, nil, def) + d, err := New(nil, def) require.NoError(t, err) success1 := &mockHandler{} @@ -136,7 +94,7 @@ func TestActionDispatcher(t *testing.T) { def := &mockHandler{} def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() ctx := context.Background() - d, err := New(ctx, nil, def) + d, err := New(nil, def) require.NoError(t, err) action := &mockUnknownAction{} @@ -151,7 +109,7 @@ func TestActionDispatcher(t *testing.T) { success2 := &mockHandler{} def := &mockHandler{} - d, err := New(context.Background(), nil, def) + d, err := New(nil, def) require.NoError(t, err) err = d.Register(&mockAction{}, success1) diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 9b72b177eb9..bfb801b9dde 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -6,230 +6,73 @@ package application import ( "context" + "time" - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/go-sysinfo" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) -// FleetServerBootstrap application, does just enough to get a Fleet Server up and running so enrollment -// can complete. -type FleetServerBootstrap struct { - bgContext context.Context - cancelCtxFn context.CancelFunc - log *logger.Logger - Config configuration.FleetAgentConfig - agentInfo *info.AgentInfo - router pipeline.Router - source source - srv *server.Server -} - -func newFleetServerBootstrap( - ctx context.Context, - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - statusCtrl status.Controller, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*FleetServerBootstrap, error) { - cfg, err := configuration.NewFromConfig(rawConfig) - if err != nil { - return nil, err - } - - if log == nil { - log, err = logger.NewFromConfig("", cfg.Settings.LoggingConfig, false) - if err != nil { - return nil, err - } - } - - logR := logreporter.NewReporter(log) - - sysInfo, err := sysinfo.Host() - if err != nil { - return nil, errors.New(err, - "fail to get system information", - errors.TypeUnexpected) - } - - bootstrapApp := &FleetServerBootstrap{ - log: log, - agentInfo: agentInfo, - } - - bootstrapApp.bgContext, bootstrapApp.cancelCtxFn = context.WithCancel(ctx) - bootstrapApp.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener") - } - - reporter := reporting.NewReporter(bootstrapApp.bgContext, log, bootstrapApp.agentInfo, logR) - - if cfg.Settings.MonitoringConfig != nil { - cfg.Settings.MonitoringConfig.Enabled = false - } else { - cfg.Settings.MonitoringConfig = &monitoringCfg.MonitoringConfig{Enabled: false} - } - monitor, err := monitoring.NewMonitor(cfg.Settings) - if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") - } - - router, err := router.New(log, stream.Factory(bootstrapApp.bgContext, agentInfo, cfg.Settings, bootstrapApp.srv, reporter, monitor, statusCtrl)) - if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") - } - bootstrapApp.router = router - - emit, err := bootstrapEmitter( - bootstrapApp.bgContext, - log, - agentInfo, - router, - &pipeline.ConfigModifiers{ - Filters: []pipeline.FilterFunc{filters.StreamChecker, modifiers.InjectFleet(rawConfig, sysInfo.Info(), agentInfo)}, +// injectFleetServerInput is the base configuration that is used plus the FleetServerComponentModifier that adjusts +// the components before sending them to the runtime manager. +var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": []string{"localhost:9200"}, }, - ) - if err != nil { - return nil, err - } - - loader := config.NewLoader(log, "") - discover := discoverer(pathConfigFile, cfg.Settings.Path) - bootstrapApp.source = newOnce(log, discover, loader, emit) - return bootstrapApp, nil -} - -// Routes returns a list of routes handled by server. -func (b *FleetServerBootstrap) Routes() *sorted.Set { - return b.router.Routes() -} - -// Start starts a managed elastic-agent. -func (b *FleetServerBootstrap) Start() error { - b.log.Info("Agent is starting") - defer b.log.Info("Agent is stopped") - - if err := b.srv.Start(); err != nil { - return err - } - if err := b.source.Start(); err != nil { - return err - } - - return nil + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "fleet-server", + }, + }, +}) + +// FleetServerComponentModifier modifies the comps to inject extra information from the policy into +// the Fleet Server component and units needed to run Fleet Server correctly. +func FleetServerComponentModifier(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) { + // TODO(blakerouse): Need to add logic to update the Fleet Server component with extra information from the policy. + return comps, nil } -// Stop stops a local agent. -func (b *FleetServerBootstrap) Stop() error { - err := b.source.Stop() - b.cancelCtxFn() - b.router.Shutdown() - b.srv.Stop() - return err -} +type fleetServerBootstrapManager struct { + log *logger.Logger -// AgentInfo retrieves elastic-agent information. -func (b *FleetServerBootstrap) AgentInfo() *info.AgentInfo { - return b.agentInfo + ch chan coordinator.ConfigChange + errCh chan error } -func bootstrapEmitter(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInfo, router pipeline.Router, modifiers *pipeline.ConfigModifiers) (pipeline.EmitterFunc, error) { - ch := make(chan *config.Config) - - go func() { - for { - var c *config.Config - select { - case <-ctx.Done(): - return - case c = <-ch: - } - - err := emit(ctx, log, agentInfo, router, modifiers, c) - if err != nil { - log.Error(err) - } - } - }() - - return func(ctx context.Context, c *config.Config) error { - span, _ := apm.StartSpan(ctx, "emit", "app.internal") - defer span.End() - ch <- c - return nil +func newFleetServerBootstrapManager( + log *logger.Logger, +) (*fleetServerBootstrapManager, error) { + return &fleetServerBootstrapManager{ + log: log, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), }, nil } -func emit(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInfo, router pipeline.Router, modifiers *pipeline.ConfigModifiers, c *config.Config) error { - if err := info.InjectAgentConfig(c); err != nil { - return err - } +func (m *fleetServerBootstrapManager) Run(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() - // perform and verify ast translation - m, err := c.ToMapStr() - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - ast, err := transpiler.NewAST(m) - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - for _, filter := range modifiers.Filters { - if err := filter(log, ast); err != nil { - return errors.New(err, "failed to filter configuration", errors.TypeConfig) - } + m.log.Debugf("injecting fleet-server for bootstrap") + select { + case <-ctx.Done(): + return ctx.Err() + case m.ch <- &localConfigChange{injectFleetServerInput}: } - // overwrite the inputs to only have a single fleet-server input - transpiler.Insert(ast, transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("fleet-server")), - }), - }), "inputs") + <-ctx.Done() + return ctx.Err() +} - spec, ok := program.SupportedMap["fleet-server"] - if !ok { - return errors.New("missing required fleet-server program specification") - } - ok, err = program.DetectProgram(spec.Rules, spec.When, spec.Constraints, agentInfo, ast) - if err != nil { - return errors.New(err, "failed parsing the configuration") - } - if !ok { - return errors.New("bootstrap configuration is incorrect causing fleet-server to not be started") - } +func (m *fleetServerBootstrapManager) Errors() <-chan error { + return m.errCh +} - return router.Route(ctx, ast.HashStr(), map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: { - { - Spec: spec, - Config: ast, - }, - }, - }) +func (m *fleetServerBootstrapManager) Watch() <-chan coordinator.ConfigChange { + return m.ch } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 4ff4c34ad42..d8c21a580d3 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -8,20 +8,18 @@ import ( "context" stderr "errors" "fmt" - "sync" "time" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" - + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -54,10 +52,6 @@ type agentInfo interface { AgentID() string } -type fleetReporter interface { - Events() ([]fleetapi.SerializableEvent, func()) -} - type stateStore interface { Add(fleetapi.Action) AckToken() string @@ -75,109 +69,102 @@ type actionQueue interface { } type fleetGateway struct { - bgContext context.Context - log *logger.Logger - dispatcher pipeline.Dispatcher - client client.Sender - scheduler scheduler.Scheduler - backoff backoff.Backoff - settings *fleetGatewaySettings - agentInfo agentInfo - reporter fleetReporter - done chan struct{} - wg sync.WaitGroup - acker store.FleetAcker - unauthCounter int - statusController status.Controller - statusReporter status.Reporter - stateStore stateStore - queue actionQueue + log *logger.Logger + dispatcher dispatcher.Dispatcher + client client.Sender + scheduler scheduler.Scheduler + settings *fleetGatewaySettings + agentInfo agentInfo + acker acker.Acker + unauthCounter int + stateFetcher coordinator.StateFetcher + stateStore stateStore + queue actionQueue + errCh chan error } // New creates a new fleet gateway func New( - ctx context.Context, log *logger.Logger, agentInfo agentInfo, client client.Sender, - d pipeline.Dispatcher, - r fleetReporter, - acker store.FleetAcker, - statusController status.Controller, + d dispatcher.Dispatcher, + acker acker.Acker, + stateFetcher coordinator.StateFetcher, stateStore stateStore, queue actionQueue, ) (gateway.FleetGateway, error) { scheduler := scheduler.NewPeriodicJitter(defaultGatewaySettings.Duration, defaultGatewaySettings.Jitter) return newFleetGatewayWithScheduler( - ctx, log, defaultGatewaySettings, agentInfo, client, d, scheduler, - r, acker, - statusController, + stateFetcher, stateStore, queue, ) } func newFleetGatewayWithScheduler( - ctx context.Context, log *logger.Logger, settings *fleetGatewaySettings, agentInfo agentInfo, client client.Sender, - d pipeline.Dispatcher, + d dispatcher.Dispatcher, scheduler scheduler.Scheduler, - r fleetReporter, - acker store.FleetAcker, - statusController status.Controller, + acker acker.Acker, + stateFetcher coordinator.StateFetcher, stateStore stateStore, queue actionQueue, ) (gateway.FleetGateway, error) { - - // Backoff implementation doesn't support the use of a context [cancellation] - // as the shutdown mechanism. - // So we keep a done channel that will be closed when the current context is shutdown. - done := make(chan struct{}) - return &fleetGateway{ - bgContext: ctx, - log: log, - dispatcher: d, - client: client, - settings: settings, - agentInfo: agentInfo, - scheduler: scheduler, - backoff: backoff.NewEqualJitterBackoff( - done, - settings.Backoff.Init, - settings.Backoff.Max, - ), - done: done, - reporter: r, - acker: acker, - statusReporter: statusController.RegisterComponent("gateway"), - statusController: statusController, - stateStore: stateStore, - queue: queue, + log: log, + dispatcher: d, + client: client, + settings: settings, + agentInfo: agentInfo, + scheduler: scheduler, + acker: acker, + stateFetcher: stateFetcher, + stateStore: stateStore, + queue: queue, + errCh: make(chan error), }, nil } -func (f *fleetGateway) worker() { +func (f *fleetGateway) Run(ctx context.Context) error { + // Backoff implementation doesn't support the use of a context [cancellation] as the shutdown mechanism. + // So we keep a done channel that will be closed when the current context is shutdown. + done := make(chan struct{}) + backoff := backoff.NewEqualJitterBackoff( + done, + f.settings.Backoff.Init, + f.settings.Backoff.Max, + ) + go func() { + <-ctx.Done() + close(done) + }() + + f.log.Info("Fleet gateway started") for { select { + case <-ctx.Done(): + f.scheduler.Stop() + f.log.Info("Fleet gateway stopped") + return ctx.Err() case ts := <-f.scheduler.WaitTick(): f.log.Debug("FleetGateway calling Checkin API") // Execute the checkin call and for any errors returned by the fleet-server API // the function will retry to communicate with fleet-server with an exponential delay and some // jitter to help better distribute the load from a fleet of agents. - resp, err := f.doExecute() + resp, err := f.doExecute(ctx, backoff) if err != nil { continue } @@ -194,35 +181,36 @@ func (f *fleetGateway) worker() { actions = append(actions, queued...) - var errMsg string // Persist state + hadErr := false f.stateStore.SetQueue(f.queue.Actions()) if err := f.stateStore.Save(); err != nil { - errMsg = fmt.Sprintf("failed to persist action_queue, error: %s", err) - f.log.Error(errMsg) - f.statusReporter.Update(state.Failed, errMsg, nil) + err = fmt.Errorf("failed to persist action_queue, error: %w", err) + f.log.Error(err) + f.errCh <- err + hadErr = true } if err := f.dispatcher.Dispatch(context.Background(), f.acker, actions...); err != nil { - errMsg = fmt.Sprintf("failed to dispatch actions, error: %s", err) - f.log.Error(errMsg) - f.statusReporter.Update(state.Failed, errMsg, nil) + err = fmt.Errorf("failed to dispatch actions, error: %w", err) + f.log.Error(err) + f.errCh <- err + hadErr = true } f.log.Debugf("FleetGateway is sleeping, next update in %s", f.settings.Duration) - if errMsg != "" { - f.statusReporter.Update(state.Failed, errMsg, nil) - } else { - f.statusReporter.Update(state.Healthy, "", nil) + if !hadErr { + f.errCh <- nil } - - case <-f.bgContext.Done(): - f.stop() - return } } } +// Errors returns the channel to watch for reported errors. +func (f *fleetGateway) Errors() <-chan error { + return f.errCh +} + // queueScheduledActions will add any action in actions with a valid start time to the queue and return the rest. // start time to current time comparisons are purposefully not made in case of cancel actions. func (f *fleetGateway) queueScheduledActions(input fleetapi.Actions) []fleetapi.Action { @@ -277,17 +265,17 @@ func (f *fleetGateway) gatherQueuedActions(ts time.Time) (queued, expired []flee return queued, expired } -func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { - f.backoff.Reset() +func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*fleetapi.CheckinResponse, error) { + bo.Reset() // Guard if the context is stopped by a out of bound call, // this mean we are rebooting to change the log level or the system is shutting us down. - for f.bgContext.Err() == nil { + for ctx.Err() == nil { f.log.Debugf("Checking started") - resp, err := f.execute(f.bgContext) + resp, err := f.execute(ctx) if err != nil { f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) - if !f.backoff.Wait() { + if !bo.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( "execute retry loop was stopped", @@ -296,7 +284,7 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { ) f.log.Error(err) - f.statusReporter.Update(state.Failed, err.Error(), nil) + f.errCh <- err return nil, err } continue @@ -307,13 +295,10 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { // This mean that the next loop was cancelled because of the context, we should return the error // but we should not log it, because we are in the process of shutting down. - return nil, f.bgContext.Err() + return nil, ctx.Err() } func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { - // get events - ee, ack := f.reporter.Events() - ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -325,13 +310,15 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.log.Debugf("using previously saved ack token: %v", ackToken) } + // get current state + state := f.stateFetcher.State() + // checkin cmd := fleetapi.NewCheckinCmd(f.agentInfo, f.client) req := &fleetapi.CheckinRequest{ AckToken: ackToken, - Events: ee, Metadata: ecsMeta, - Status: f.statusController.StatusString(), + Status: agentStateToString(state.State), } resp, err := cmd.Execute(ctx, req) @@ -362,8 +349,6 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - // ack events so they are dropped from queue - ack() return resp, nil } @@ -376,25 +361,16 @@ func isUnauth(err error) bool { return errors.Is(err, client.ErrInvalidAPIKey) } -func (f *fleetGateway) Start() error { - f.wg.Add(1) - go func(wg *sync.WaitGroup) { - defer f.log.Info("Fleet gateway is stopped") - defer wg.Done() - - f.worker() - }(&f.wg) - return nil -} - -func (f *fleetGateway) stop() { - f.log.Info("Fleet gateway is stopping") - defer f.scheduler.Stop() - f.statusReporter.Unregister() - close(f.done) - f.wg.Wait() -} - func (f *fleetGateway) SetClient(c client.Sender) { f.client = c } + +func agentStateToString(state agentclient.State) string { + switch state { + case agentclient.Healthy: + return "online" + case agentclient.Failed: + return "error" + } + return "degraded" +} diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index a9b9380519f..deb871192bc 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -8,12 +8,13 @@ package fleet import ( "bytes" "context" - "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" + "os" + "path/filepath" "sync" "testing" "time" @@ -22,15 +23,13 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - repo "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" - fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -45,9 +44,9 @@ type testingClient struct { func (t *testingClient) Send( _ context.Context, - method string, - path string, - params url.Values, + _ string, + _ string, + _ url.Values, headers http.Header, body io.Reader, ) (*http.Response, error) { @@ -80,7 +79,7 @@ type testingDispatcher struct { received chan struct{} } -func (t *testingDispatcher) Dispatch(_ context.Context, acker store.FleetAcker, actions ...fleetapi.Action) error { +func (t *testingDispatcher) Dispatch(_ context.Context, acker acker.Acker, actions ...fleetapi.Action) error { t.Lock() defer t.Unlock() defer func() { t.received <- struct{}{} }() @@ -135,7 +134,7 @@ func (m *mockQueue) Actions() []fleetapi.Action { return args.Get(0).([]fleetapi.Action) } -type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper, repo.Backend) +type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper) func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGatewayFunc) func(t *testing.T) { return func(t *testing.T) { @@ -144,37 +143,29 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat dispatcher := newTestingDispatcher() log, _ := logger.New("fleet_gateway", false) - rep := getReporter(agentInfo, log, t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - rep, - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) require.NoError(t, err) - fn(t, gateway, client, dispatcher, scheduler, rep) + fn(t, gateway, client, dispatcher, scheduler) } } @@ -212,8 +203,10 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) @@ -224,12 +217,16 @@ func TestFleetGateway(t *testing.T) { return nil }), ) - err := gateway.Start() - require.NoError(t, err) + + errCh := runFleetGateway(ctx, gateway) // Synchronize scheduler and acking of calls from the worker go routine. scheduler.Next() waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) t.Run("Successfully connects and receives a series of actions", withGateway(agentInfo, settings, func( @@ -238,8 +235,10 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { // TODO: assert no events @@ -269,11 +268,15 @@ func TestFleetGateway(t *testing.T) { return nil }), ) - err := gateway.Start() - require.NoError(t, err) + + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) // Test the normal time based execution. @@ -286,30 +289,24 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -323,8 +320,7 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) var count int for { @@ -334,6 +330,10 @@ func TestFleetGateway(t *testing.T) { return } } + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("queue action from checkin", func(t *testing.T) { @@ -345,10 +345,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -357,20 +354,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -395,12 +389,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("run action from queue", func(t *testing.T) { @@ -412,10 +409,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -423,20 +417,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -450,12 +441,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("discard expired action from queue", func(t *testing.T) { @@ -467,10 +461,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -478,20 +469,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -505,12 +493,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("cancel action from checkin", func(t *testing.T) { @@ -522,10 +513,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -535,20 +523,17 @@ func TestFleetGateway(t *testing.T) { // queue.Cancel does not need to be mocked here as it is ran in the cancel action dispatcher. gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -578,52 +563,16 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) - }) - t.Run("send event and receive no action", withGateway(agentInfo, settings, func( - t *testing.T, - gateway gateway.FleetGateway, - client *testingClient, - dispatcher *testingDispatcher, - scheduler *scheduler.Stepper, - rep repo.Backend, - ) { - _ = rep.Report(context.Background(), &testStateEvent{}) - waitFn := ackSeq( - client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { - cr := &request{} - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal(content, &cr) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, 1, len(cr.Events)) - - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) - return resp, nil - }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), - ) - err := gateway.Start() + cancel() + err = <-errCh require.NoError(t, err) - - // Synchronize scheduler and acking of calls from the worker go routine. - scheduler.Next() - waitFn() - })) + }) t.Run("Test the wait loop is interruptible", func(t *testing.T) { // 20mins is the double of the base timeout values for golang test suites. @@ -634,18 +583,15 @@ func TestFleetGateway(t *testing.T) { dispatcher := newTestingDispatcher() ctx, cancel := context.WithCancel(context.Background()) - log, _ := logger.New("tst", false) - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + log, _ := logger.New("tst", false) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, &fleetGatewaySettings{ Duration: d, @@ -655,13 +601,11 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) ch1 := dispatcher.Answer(func(actions ...fleetapi.Action) error { return nil }) @@ -670,8 +614,7 @@ func TestFleetGateway(t *testing.T) { return resp, nil }) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) // Silently dispatch action. go func() { @@ -694,6 +637,8 @@ func TestFleetGateway(t *testing.T) { // 2. WaitTick() will block for 20 minutes. // 3. Stop will should unblock the wait. cancel() + err = <-errCh + require.NoError(t, err) }) } @@ -712,16 +657,16 @@ func TestRetriesOnFailures(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil } clientWaitFn := client.Answer(fail) - err := gateway.Start() - require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) + errCh := runFleetGateway(ctx, gateway) // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -734,18 +679,6 @@ func TestRetriesOnFailures(t *testing.T) { // API recover waitFn := ackSeq( client.Answer(func(_ http.Header, body io.Reader) (*http.Response, error) { - cr := &request{} - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal(content, &cr) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, 1, len(cr.Events)) - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), @@ -757,6 +690,10 @@ func TestRetriesOnFailures(t *testing.T) { ) waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) t.Run("The retry loop is interruptible", @@ -769,16 +706,16 @@ func TestRetriesOnFailures(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil } waitChan := client.Answer(fail) - err := gateway.Start() - require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) + errCh := runFleetGateway(ctx, gateway) // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -787,32 +724,59 @@ func TestRetriesOnFailures(t *testing.T) { // delay. <-waitChan - // non-obvious but withGateway on return will stop the gateway before returning and we should - // exit the retry loop. The init value of the backoff is set to exceed the test default timeout. + cancel() + err := <-errCh + require.NoError(t, err) })) } -func getReporter(info agentInfo, log *logger.Logger, t *testing.T) *fleetreporter.Reporter { - fleetR, err := fleetreporter.NewReporter(info, log, fleetreporterConfig.DefaultConfig()) - if err != nil { - t.Fatal(errors.Wrap(err, "fail to create reporters")) - } +type testAgentInfo struct{} + +func (testAgentInfo) AgentID() string { return "agent-secret" } + +type emptyStateFetcher struct{} - return fleetR +func (e *emptyStateFetcher) State() coordinator.State { + return coordinator.State{} } -type testAgentInfo struct{} +func runFleetGateway(ctx context.Context, g gateway.FleetGateway) <-chan error { + done := make(chan bool) + errCh := make(chan error, 1) + go func() { + err := g.Run(ctx) + close(done) + if err != nil && !errors.Is(err, context.Canceled) { + errCh <- err + } else { + errCh <- nil + } + }() + go func() { + for { + select { + case <-done: + return + case <-g.Errors(): + // ignore errors here + } + } + }() + return errCh +} -func (testAgentInfo) AgentID() string { return "agent-secret" } +func newStateStore(t *testing.T, log *logger.Logger) *store.StateStore { + dir, err := ioutil.TempDir("", "fleet-gateway-unit-test") + require.NoError(t, err) -type testStateEvent struct{} + filename := filepath.Join(dir, "state.enc") + diskStore := storage.NewDiskStore(filename) + stateStore, err := store.NewStateStore(log, diskStore) + require.NoError(t, err) -func (testStateEvent) Type() string { return repo.EventTypeState } -func (testStateEvent) SubType() string { return repo.EventSubTypeInProgress } -func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } -func (testStateEvent) Message() string { return "hello" } -func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } + t.Cleanup(func() { + os.RemoveAll(dir) + }) -type request struct { - Events []interface{} `json:"events"` + return stateStore } diff --git a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go b/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go deleted file mode 100644 index d5097655a63..00000000000 --- a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" -) - -type noopController struct{} - -func (*noopController) RegisterComponent(_ string) status.Reporter { return &noopReporter{} } -func (*noopController) RegisterComponentWithPersistance(_ string, _ bool) status.Reporter { - return &noopReporter{} -} -func (*noopController) RegisterApp(_ string, _ string) status.Reporter { return &noopReporter{} } -func (*noopController) Status() status.AgentStatus { return status.AgentStatus{Status: status.Healthy} } -func (*noopController) StatusCode() status.AgentStatusCode { return status.Healthy } -func (*noopController) UpdateStateID(_ string) {} -func (*noopController) StatusString() string { return "online" } - -type noopReporter struct{} - -func (*noopReporter) Update(_ state.Status, _ string, _ map[string]interface{}) {} -func (*noopReporter) Unregister() {} diff --git a/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go b/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go deleted file mode 100644 index 763f003b25f..00000000000 --- a/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleetserver - -import ( - "context" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const gatewayWait = 2 * time.Second - -var injectFleetServerInput = map[string]interface{}{ - // outputs is replaced by the fleet-server.spec - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": []string{"localhost:9200"}, - }, - }, - "inputs": []interface{}{ - map[string]interface{}{ - "type": "fleet-server", - }, - }, -} - -// fleetServerWrapper wraps the fleetGateway to ensure that a local Fleet Server is running before trying -// to communicate with the gateway, which is local to the Elastic Agent. -type fleetServerWrapper struct { - bgContext context.Context - log *logger.Logger - cfg *configuration.FleetAgentConfig - injectedCfg *config.Config - wrapped gateway.FleetGateway - emitter pipeline.EmitterFunc -} - -// New creates a new fleet server gateway wrapping another fleet gateway. -func New( - ctx context.Context, - log *logger.Logger, - cfg *configuration.FleetAgentConfig, - rawConfig *config.Config, - wrapped gateway.FleetGateway, - emitter pipeline.EmitterFunc, - injectServer bool) (gateway.FleetGateway, error) { - if cfg.Server == nil || !injectServer { - // not running a local Fleet Server - return wrapped, nil - } - - injectedCfg, err := injectFleetServer(rawConfig) - if err != nil { - return nil, errors.New(err, "failed to inject fleet-server input to start local Fleet Server", errors.TypeConfig) - } - - return &fleetServerWrapper{ - bgContext: ctx, - log: log, - cfg: cfg, - injectedCfg: injectedCfg, - wrapped: wrapped, - emitter: emitter, - }, nil -} - -// Start starts the gateway. -func (w *fleetServerWrapper) Start() error { - err := w.emitter(context.Background(), w.injectedCfg) - if err != nil { - return err - } - sleep(w.bgContext, gatewayWait) - return w.wrapped.Start() -} - -// SetClient sets the client for the wrapped gateway. -func (w *fleetServerWrapper) SetClient(c client.Sender) { - w.wrapped.SetClient(c) -} - -func injectFleetServer(rawConfig *config.Config) (*config.Config, error) { - cfg := map[string]interface{}{} - err := rawConfig.Unpack(cfg) - if err != nil { - return nil, err - } - cloned, err := config.NewConfigFrom(cfg) - if err != nil { - return nil, err - } - err = cloned.Merge(injectFleetServerInput) - if err != nil { - return nil, err - } - return cloned, nil -} - -func sleep(ctx context.Context, d time.Duration) { - t := time.NewTimer(d) - defer t.Stop() - select { - case <-ctx.Done(): - case <-t.C: - } -} diff --git a/internal/pkg/agent/application/gateway/gateway.go b/internal/pkg/agent/application/gateway/gateway.go index 47591a4a04e..d43dd32a0c2 100644 --- a/internal/pkg/agent/application/gateway/gateway.go +++ b/internal/pkg/agent/application/gateway/gateway.go @@ -4,16 +4,23 @@ package gateway -import "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" +import ( + "context" + + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" +) // FleetGateway is a gateway between the Agent and the Fleet API, it's take cares of all the // bidirectional communication requirements. The gateway aggregates events and will periodically // call the API to send the events and will receive actions to be executed locally. // The only supported action for now is a "ActionPolicyChange". type FleetGateway interface { - // Start starts the gateway. - Start() error + // Run runs the gateway. + Run(ctx context.Context) error + + // Errors returns the channel to watch for reported errors. + Errors() <-chan error - // Set the client for the gateway. + // SetClient sets the client for the gateway. SetClient(client.Sender) } diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go deleted file mode 100644 index 29f311fe582..00000000000 --- a/internal/pkg/agent/application/local_mode.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package application - -import ( - "context" - "path/filepath" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/dir" - acker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -type discoverFunc func() ([]string, error) - -// ErrNoConfiguration is returned when no configuration are found. -var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) - -// Local represents a standalone agents, that will read his configuration directly from disk. -// Some part of the configuration can be reloaded. -type Local struct { - bgContext context.Context - cancelCtxFn context.CancelFunc - log *logger.Logger - router pipeline.Router - source source - agentInfo *info.AgentInfo - srv *server.Server -} - -type source interface { - Start() error - Stop() error -} - -// newLocal return a agent managed by local configuration. -func newLocal( - ctx context.Context, - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*Local, error) { - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) - if err != nil { - return nil, err - } - - cfg, err := configuration.NewFromConfig(rawConfig) - if err != nil { - return nil, err - } - - if log == nil { - log, err = logger.NewFromConfig("", cfg.Settings.LoggingConfig, true) - if err != nil { - return nil, err - } - } - - logR := logreporter.NewReporter(log) - - localApplication := &Local{ - log: log, - agentInfo: agentInfo, - } - - localApplication.bgContext, localApplication.cancelCtxFn = context.WithCancel(ctx) - localApplication.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener") - } - - reporter := reporting.NewReporter(localApplication.bgContext, log, localApplication.agentInfo, logR) - - monitor, err := monitoring.NewMonitor(cfg.Settings) - if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") - } - - router, err := router.New(log, stream.Factory(localApplication.bgContext, agentInfo, cfg.Settings, localApplication.srv, reporter, monitor, statusCtrl)) - if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") - } - localApplication.router = router - - composableCtrl, err := composable.New(log, rawConfig) - if err != nil { - return nil, errors.New(err, "failed to initialize composable controller") - } - - discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) - emit, err := emitter.New( - localApplication.bgContext, - log, - agentInfo, - composableCtrl, - router, - &pipeline.ConfigModifiers{ - Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}, - Filters: []pipeline.FilterFunc{filters.StreamChecker}, - }, - caps, - monitor, - ) - if err != nil { - return nil, err - } - - loader := config.NewLoader(log, externalConfigsGlob()) - - var cfgSource source - if !cfg.Settings.Reload.Enabled { - log.Debug("Reloading of configuration is off") - cfgSource = newOnce(log, discover, loader, emit) - } else { - log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) - cfgSource = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader, emit) - } - - localApplication.source = cfgSource - - // create a upgrader to use in local mode - upgrader := upgrade.NewUpgrader( - agentInfo, - cfg.Settings.DownloadConfig, - log, - []context.CancelFunc{localApplication.cancelCtxFn}, - reexec, - acker.NewAcker(), - reporter, - caps) - uc.SetUpgrader(upgrader) - - return localApplication, nil -} - -func externalConfigsGlob() string { - return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) -} - -// Routes returns a list of routes handled by agent. -func (l *Local) Routes() *sorted.Set { - return l.router.Routes() -} - -// Start starts a local agent. -func (l *Local) Start() error { - l.log.Info("Agent is starting") - defer l.log.Info("Agent is stopped") - - if err := l.srv.Start(); err != nil { - return err - } - if err := l.source.Start(); err != nil { - return err - } - - return nil -} - -// Stop stops a local agent. -func (l *Local) Stop() error { - err := l.source.Stop() - l.cancelCtxFn() - l.router.Shutdown() - l.srv.Stop() - return err -} - -// AgentInfo retrieves agent information. -func (l *Local) AgentInfo() *info.AgentInfo { - return l.agentInfo -} - -func discoverer(patterns ...string) discoverFunc { - var p []string - for _, newP := range patterns { - if len(newP) == 0 { - continue - } - - p = append(p, newP) - } - - if len(p) == 0 { - return func() ([]string, error) { - return []string{}, ErrNoConfiguration - } - } - - return func() ([]string, error) { - return dir.DiscoverFiles(p...) - } -} diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index d334ae0198c..893b7541606 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -7,90 +7,53 @@ package application import ( "context" "fmt" + "time" - "go.elastic.co/apm" - - "github.com/elastic/go-sysinfo" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" fleetgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleet" - localgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleetserver" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/dispatcher" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/fleet" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/lazy" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/retrier" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" + fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/queue" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" - "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/elastic-agent/internal/pkg/remote" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) -type stateStore interface { - Add(fleetapi.Action) - AckToken() string - SetAckToken(ackToken string) - Save() error - Actions() []fleetapi.Action - Queue() []fleetapi.Action -} - -// Managed application, when the application is run in managed mode, most of the configuration are -// coming from the Fleet App. -type Managed struct { - bgContext context.Context - cancelCtxFn context.CancelFunc +type managedConfigManager struct { log *logger.Logger - Config configuration.FleetAgentConfig agentInfo *info.AgentInfo - gateway gateway.FleetGateway - router pipeline.Router - srv *server.Server - stateStore stateStore - upgrader *upgrade.Upgrader + cfg *configuration.Configuration + client *remote.Client + store storage.Store + stateStore *store.StateStore + actionQueue *queue.ActionQueue + runtime *runtime.Manager + coord *coordinator.Coordinator + + ch chan coordinator.ConfigChange + errCh chan error } -func newManaged( - ctx context.Context, +func newManagedConfigManager( log *logger.Logger, - storeSaver storage.Store, - cfg *configuration.Configuration, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*Managed, error) { - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) - if err != nil { - return nil, err - } - - client, err := client.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) + cfg *configuration.Configuration, + storeSaver storage.Store, + runtime *runtime.Manager, +) (*managedConfigManager, error) { + client, err := fleetclient.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) if err != nil { return nil, errors.New(err, "fail to create API client", @@ -98,115 +61,219 @@ func newManaged( errors.M(errors.MetaKeyURI, cfg.Fleet.Client.Host)) } - sysInfo, err := sysinfo.Host() + // Create the state store that will persist the last good policy change on disk. + stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) if err != nil { - return nil, errors.New(err, - "fail to get system information", - errors.TypeUnexpected) + return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) } - managedApplication := &Managed{ - log: log, - agentInfo: agentInfo, + actionQueue, err := queue.NewActionQueue(stateStore.Queue()) + if err != nil { + return nil, fmt.Errorf("unable to initialize action queue: %w", err) } - managedApplication.bgContext, managedApplication.cancelCtxFn = context.WithCancel(ctx) - managedApplication.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener", errors.TypeNetwork) + return &managedConfigManager{ + log: log, + agentInfo: agentInfo, + cfg: cfg, + client: client, + store: storeSaver, + stateStore: stateStore, + actionQueue: actionQueue, + runtime: runtime, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), + }, nil +} + +func (m *managedConfigManager) Run(ctx context.Context) error { + // Check setup correctly in application (the actionDispatcher and coord must be set manually) + if m.coord == nil { + return errors.New("coord must be set before calling Run") } - // must start before `Start` is called as Fleet will already try to start applications - // before `Start` is even called. - err = managedApplication.srv.Start() - if err != nil { - return nil, errors.New(err, "starting GRPC listener", errors.TypeNetwork) + + // Un-enrolled so we will not do anything. + if m.wasUnenrolled() { + m.log.Warnf("Elastic Agent was previously unenrolled. To reactivate please reconfigure or enroll again.") + return nil } - logR := logreporter.NewReporter(log) - fleetR, err := fleetreporter.NewReporter(agentInfo, log, cfg.Fleet.Reporting) - if err != nil { - return nil, errors.New(err, "fail to create reporters") + // Reload ID because of win7 sync issue + if err := m.agentInfo.ReloadID(); err != nil { + return err } - combinedReporter := reporting.NewReporter(managedApplication.bgContext, log, agentInfo, logR, fleetR) - monitor, err := monitoring.NewMonitor(cfg.Settings) + // Create context that is cancelled on unenroll. + gatewayCtx, gatewayCancel := context.WithCancel(ctx) + defer gatewayCancel() + + // Create the actionDispatcher. + actionDispatcher, policyChanger, err := newManagedActionDispatcher(m, gatewayCancel) if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") + return err } - router, err := router.New(log, stream.Factory(managedApplication.bgContext, agentInfo, cfg.Settings, managedApplication.srv, combinedReporter, monitor, statusCtrl)) + // Create ackers to enqueue/retry failed acks + ack, err := fleet.NewAcker(m.log, m.agentInfo, m.client) if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") + return fmt.Errorf("failed to create acker: %w", err) } - managedApplication.router = router + retrier := retrier.New(ack, m.log) + batchedAcker := lazy.NewAcker(ack, m.log, lazy.WithRetrier(retrier)) + actionAcker := store.NewStateStoreActionAcker(batchedAcker, m.stateStore) + + // Run the retrier. + retrierRun := make(chan bool) + retrierCtx, retrierCancel := context.WithCancel(ctx) + defer func() { + retrierCancel() + <-retrierRun + }() + go func() { + retrier.Run(retrierCtx) + close(retrierRun) + }() - composableCtrl, err := composable.New(log, rawConfig) - if err != nil { - return nil, errors.New(err, "failed to initialize composable controller") + actions := m.stateStore.Actions() + stateRestored := false + if len(actions) > 0 && !m.wasUnenrolled() { + // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a + // persisted action on disk we should be able to ask Fleet to get the latest configuration. + // But at the moment this is not possible because the policy change was acked. + if err := store.ReplayActions(ctx, m.log, actionDispatcher, actionAcker, actions...); err != nil { + m.log.Errorf("could not recover state, error %+v, skipping...", err) + } + stateRestored = true + } + + // In the case this is the first start and this Elastic Agent is running a Fleet Server; we need to ensure that + // the Fleet Server is running before the Fleet gateway is started. + if !stateRestored && m.cfg.Fleet.Server != nil { + err = m.initFleetServer(ctx) + if err != nil { + return fmt.Errorf("failed to initialize Fleet Server: %w", err) + } } - emit, err := emitter.New( - managedApplication.bgContext, - log, - agentInfo, - composableCtrl, - router, - &pipeline.ConfigModifiers{ - Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}, - Filters: []pipeline.FilterFunc{filters.StreamChecker, modifiers.InjectFleet(rawConfig, sysInfo.Info(), agentInfo)}, - }, - caps, - monitor, + gateway, err := fleetgateway.New( + m.log, + m.agentInfo, + m.client, + actionDispatcher, + actionAcker, + m.coord, + m.stateStore, + m.actionQueue, ) if err != nil { - return nil, err + return err } - acker, err := fleet.NewAcker(log, agentInfo, client) - if err != nil { - return nil, err + + // Not running a Fleet Server so the gateway and acker can be changed based on the configuration change. + if m.cfg.Fleet.Server == nil { + policyChanger.AddSetter(gateway) + policyChanger.AddSetter(ack) } - // Create ack retrier that is used by lazyAcker to enqueue/retry failed acks - retrier := retrier.New(acker, log) - // Run acking retrier. The lazy acker sends failed actions acks to retrier. - go retrier.Run(ctx) + // Proxy errors from the gateway to our own channel. + go func() { + for { + select { + case <-ctx.Done(): + return + case err := <-gateway.Errors(): + m.errCh <- err + } + } + }() + + // Run the gateway. + gatewayRun := make(chan bool) + gatewayErrCh := make(chan error) + defer func() { + gatewayCancel() + <-gatewayRun + }() + go func() { + err := gateway.Run(gatewayCtx) + close(gatewayRun) + gatewayErrCh <- err + }() + + <-ctx.Done() + return <-gatewayErrCh +} + +func (m *managedConfigManager) Errors() <-chan error { + return m.errCh +} - batchedAcker := lazy.NewAcker(acker, log, lazy.WithRetrier(retrier)) +func (m *managedConfigManager) Watch() <-chan coordinator.ConfigChange { + return m.ch +} - // Create the state store that will persist the last good policy change on disk. - stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) - if err != nil { - return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) +func (m *managedConfigManager) wasUnenrolled() bool { + actions := m.stateStore.Actions() + for _, a := range actions { + if a.Type() == "UNENROLL" { + return true + } } - managedApplication.stateStore = stateStore - actionAcker := store.NewStateStoreActionAcker(batchedAcker, stateStore) + return false +} - actionQueue, err := queue.NewActionQueue(stateStore.Queue()) - if err != nil { - return nil, fmt.Errorf("unable to initialize action queue: %w", err) +func (m *managedConfigManager) initFleetServer(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + + m.log.Debugf("injecting basic fleet-server for first start") + select { + case <-ctx.Done(): + return ctx.Err() + case m.ch <- &localConfigChange{injectFleetServerInput}: } - actionDispatcher, err := dispatcher.New(managedApplication.bgContext, log, handlers.NewDefault(log)) - if err != nil { - return nil, err + m.log.Debugf("watching fleet-server-default component state") + sub := m.runtime.Subscribe(ctx, "fleet-server-default") + for { + select { + case <-ctx.Done(): + return ctx.Err() + case state := <-sub.Ch(): + if fleetServerRunning(state) { + m.log.With("state", state).Debugf("fleet-server-default component is running") + return nil + } + m.log.With("state", state).Debugf("fleet-server-default component is not running") + } + } +} + +func fleetServerRunning(state runtime.ComponentState) bool { + if state.State == client.UnitStateHealthy || state.State == client.UnitStateDegraded { + for key, unit := range state.Units { + if key.UnitType == client.UnitTypeInput && key.UnitID == "fleet-server-default-fleet-server" { + if unit.State == client.UnitStateHealthy || unit.State == client.UnitStateDegraded { + return true + } + } + } } + return false +} - managedApplication.upgrader = upgrade.NewUpgrader( - agentInfo, - cfg.Settings.DownloadConfig, - log, - []context.CancelFunc{managedApplication.cancelCtxFn}, - reexec, - acker, - combinedReporter, - caps) +func newManagedActionDispatcher(m *managedConfigManager, canceller context.CancelFunc) (*dispatcher.ActionDispatcher, *handlers.PolicyChange, error) { + actionDispatcher, err := dispatcher.New(m.log, handlers.NewDefault(m.log)) + if err != nil { + return nil, nil, err + } policyChanger := handlers.NewPolicyChange( - log, - emit, - agentInfo, - cfg, - storeSaver, + m.log, + m.agentInfo, + m.cfg, + m.store, + m.ch, ) actionDispatcher.MustRegister( @@ -216,146 +283,50 @@ func newManaged( actionDispatcher.MustRegister( &fleetapi.ActionPolicyReassign{}, - handlers.NewPolicyReassign(log), + handlers.NewPolicyReassign(m.log), ) actionDispatcher.MustRegister( &fleetapi.ActionUnenroll{}, handlers.NewUnenroll( - log, - emit, - router, - []context.CancelFunc{managedApplication.cancelCtxFn}, - stateStore, + m.log, + m.ch, + []context.CancelFunc{canceller}, + m.stateStore, ), ) actionDispatcher.MustRegister( &fleetapi.ActionUpgrade{}, - handlers.NewUpgrade(log, managedApplication.upgrader), + handlers.NewUpgrade(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionSettings{}, handlers.NewSettings( - log, - reexec, - agentInfo, + m.log, + m.agentInfo, + m.coord, ), ) actionDispatcher.MustRegister( &fleetapi.ActionCancel{}, handlers.NewCancel( - log, - actionQueue, + m.log, + m.actionQueue, ), ) actionDispatcher.MustRegister( &fleetapi.ActionApp{}, - handlers.NewAppAction(log, managedApplication.srv), + handlers.NewAppAction(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionUnknown{}, - handlers.NewUnknown(log), - ) - - actions := stateStore.Actions() - stateRestored := false - if len(actions) > 0 && !managedApplication.wasUnenrolled() { - // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a - // persisted action on disk we should be able to ask Fleet to get the latest configuration. - // But at the moment this is not possible because the policy change was acked. - if err := store.ReplayActions(ctx, log, actionDispatcher, actionAcker, actions...); err != nil { - log.Errorf("could not recover state, error %+v, skipping...", err) - } - stateRestored = true - } - - gateway, err := fleetgateway.New( - managedApplication.bgContext, - log, - agentInfo, - client, - actionDispatcher, - fleetR, - actionAcker, - statusCtrl, - stateStore, - actionQueue, + handlers.NewUnknown(m.log), ) - if err != nil { - return nil, err - } - gateway, err = localgateway.New(managedApplication.bgContext, log, cfg.Fleet, rawConfig, gateway, emit, !stateRestored) - if err != nil { - return nil, err - } - // add the acker and gateway to setters, so the they can be updated - // when the hosts for Fleet Server are updated by the policy. - if cfg.Fleet.Server == nil { - // setters only set when not running a local Fleet Server - policyChanger.AddSetter(gateway) - policyChanger.AddSetter(acker) - } - managedApplication.gateway = gateway - return managedApplication, nil -} - -// Routes returns a list of routes handled by agent. -func (m *Managed) Routes() *sorted.Set { - return m.router.Routes() -} - -// Start starts a managed elastic-agent. -func (m *Managed) Start() error { - m.log.Info("Agent is starting") - if m.wasUnenrolled() { - m.log.Warnf("agent was previously unenrolled. To reactivate please reconfigure or enroll again.") - return nil - } - - // reload ID because of win7 sync issue - if err := m.agentInfo.ReloadID(); err != nil { - return err - } - - err := m.upgrader.Ack(m.bgContext) - if err != nil { - m.log.Warnf("failed to ack update %v", err) - } - - err = m.gateway.Start() - if err != nil { - return err - } - return nil -} - -// Stop stops a managed elastic-agent. -func (m *Managed) Stop() error { - defer m.log.Info("Agent is stopped") - m.cancelCtxFn() - m.router.Shutdown() - m.srv.Stop() - return nil -} - -// AgentInfo retrieves elastic-agent information. -func (m *Managed) AgentInfo() *info.AgentInfo { - return m.agentInfo -} - -func (m *Managed) wasUnenrolled() bool { - actions := m.stateStore.Actions() - for _, a := range actions { - if a.Type() == "UNENROLL" { - return true - } - } - - return false + return actionDispatcher, policyChanger, nil } diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go deleted file mode 100644 index 847211dc079..00000000000 --- a/internal/pkg/agent/application/managed_mode_test.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package application - -import ( - "context" - "encoding/json" - "testing" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/dispatcher" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestManagedModeRouting(t *testing.T) { - - streams := make(map[pipeline.RoutingKey]pipeline.Stream) - streamFn := func(l *logger.Logger, r pipeline.RoutingKey) (pipeline.Stream, error) { - m := newMockStreamStore() - streams[r] = m - - return m, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("", false) - router, _ := router.New(log, streamFn) - agentInfo, _ := info.NewAgentInfo(false) - nullStore := &storage.NullStore{} - composableCtrl, _ := composable.New(log, nil) - emit, err := emitter.New(ctx, log, agentInfo, composableCtrl, router, &pipeline.ConfigModifiers{Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}}, nil) - require.NoError(t, err) - - actionDispatcher, err := dispatcher.New(ctx, log, handlers.NewDefault(log)) - require.NoError(t, err) - - cfg := configuration.DefaultConfiguration() - actionDispatcher.MustRegister( - &fleetapi.ActionPolicyChange{}, - handlers.NewPolicyChange( - log, - emit, - agentInfo, - cfg, - nullStore, - ), - ) - - actions, err := testActions() - require.NoError(t, err) - - err = actionDispatcher.Dispatch(context.Background(), noopacker.NewAcker(), actions...) - require.NoError(t, err) - - // has 1 config request for fb, mb and monitoring? - assert.Equal(t, 1, len(streams)) - - defaultStreamStore, found := streams["default"] - assert.True(t, found, "default group not found") - assert.Equal(t, 1, len(defaultStreamStore.(*mockStreamStore).store)) - - confReq := defaultStreamStore.(*mockStreamStore).store[0] - assert.Equal(t, 3, len(confReq.ProgramNames())) - assert.Equal(t, modifiers.MonitoringName, confReq.ProgramNames()[2]) -} - -func testActions() ([]fleetapi.Action, error) { - checkinResponse := &fleetapi.CheckinResponse{} - if err := json.Unmarshal([]byte(fleetResponse), &checkinResponse); err != nil { - return nil, err - } - - return checkinResponse.Actions, nil -} - -type mockStreamStore struct { - store []configrequest.Request -} - -func newMockStreamStore() *mockStreamStore { - return &mockStreamStore{ - store: make([]configrequest.Request, 0), - } -} - -func (m *mockStreamStore) Execute(_ context.Context, cr configrequest.Request) error { - m.store = append(m.store, cr) - return nil -} - -func (m *mockStreamStore) Close() error { - return nil -} - -func (m *mockStreamStore) Shutdown() {} - -const fleetResponse = ` -{ - "action": "checkin", - "actions": [{ - "agent_id": "17e93530-7f42-11ea-9330-71e968b29fa4", - "type": "POLICY_CHANGE", - "data": { - "policy": { - "id": "86561d50-7f3b-11ea-9fab-3db3bdb4efa4", - "outputs": { - "default": { - "type": "elasticsearch", - "hosts": [ - "http://localhost:9200" - ], - "api_key": "pNr6fnEBupQ3-5oEEkWJ:FzhrQOzZSG-Vpsq9CGk4oA" - } - }, - - "inputs": [{ - "type": "system/metrics", - "enabled": true, - "streams": [{ - "id": "system/metrics-system.core", - "enabled": true, - "data_stream.dataset": "system.core", - "period": "10s", - "metrics": [ - "percentages" - ] - }, - { - "id": "system/metrics-system.cpu", - "enabled": true, - "data_stream.dataset": "system.cpu", - "period": "10s", - "metrics": [ - "percentages", - "normalized_percentages" - ] - }, - { - "id": "system/metrics-system.diskio", - "enabled": true, - "data_stream.dataset": "system.diskio", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.entropy", - "enabled": true, - "data_stream.dataset": "system.entropy", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.filesystem", - "enabled": true, - "data_stream.dataset": "system.filesystem", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.fsstat", - "enabled": true, - "data_stream.dataset": "system.fsstat", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.load", - "enabled": true, - "data_stream.dataset": "system.load", - "period": "10s" - }, - { - "id": "system/metrics-system.memory", - "enabled": true, - "data_stream.dataset": "system.memory", - "period": "10s" - }, - { - "id": "system/metrics-system.network", - "enabled": true, - "data_stream.dataset": "system.network", - "period": "10s" - }, - { - "id": "system/metrics-system.network_summary", - "enabled": true, - "data_stream.dataset": "system.network_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.process", - "enabled": true, - "data_stream.dataset": "system.process", - "period": "10s", - "processes": [ - ".*" - ], - "include_top_n.enabled": true, - "include_top_n.by_cpu": 5, - "include_top_n.by_memory": 5, - "cmdline.cache.enabled": true, - "cgroups.enabled": true, - "env.whitelist": [], - "include_cpu_ticks": false - }, - { - "id": "system/metrics-system.process_summary", - "enabled": true, - "data_stream.dataset": "system.process_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.raid", - "enabled": true, - "data_stream.dataset": "system.raid", - "period": "10s", - "mount_point": "/" - }, - { - "id": "system/metrics-system.service", - "enabled": true, - "data_stream.dataset": "system.service", - "period": "10s", - "state_filter": [] - }, - { - "id": "system/metrics-system.socket_summary", - "enabled": true, - "data_stream.dataset": "system.socket_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.uptime", - "enabled": true, - "data_stream.dataset": "system.uptime", - "period": "15m" - }, - { - "id": "system/metrics-system.users", - "enabled": true, - "data_stream.dataset": "system.users", - "period": "10s" - } - ] - }, - { - "type": "logfile", - "enabled": true, - "streams": [{ - "id": "logs-system.auth", - "enabled": true, - "data_stream.dataset": "system.auth", - "paths": [ - "/var/log/auth.log*", - "/var/log/secure*" - ] - }, - { - "id": "logs-system.syslog", - "enabled": true, - "data_stream.dataset": "system.syslog", - "paths": [ - "/var/log/messages*", - "/var/log/syslog*" - ] - } - ] - } - ], - - "revision": 3, - "agent.monitoring": { - "use_output": "default", - "enabled": true, - "logs": true, - "metrics": true - } - } - }, - "id": "1c7e26a0-7f42-11ea-9330-71e968b29fa4", - "created_at": "2020-04-15T17:54:11.081Z" - }] -} - ` diff --git a/internal/pkg/agent/application/once.go b/internal/pkg/agent/application/once.go index 19a17c61df9..7326612950b 100644 --- a/internal/pkg/agent/application/once.go +++ b/internal/pkg/agent/application/once.go @@ -6,8 +6,10 @@ package application import ( "context" + "fmt" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -17,14 +19,15 @@ type once struct { log *logger.Logger discover discoverFunc loader *config.Loader - emitter pipeline.EmitterFunc + ch chan coordinator.ConfigChange + errCh chan error } -func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader, emitter pipeline.EmitterFunc) *once { - return &once{log: log, discover: discover, loader: loader, emitter: emitter} +func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader) *once { + return &once{log: log, discover: discover, loader: loader, ch: make(chan coordinator.ConfigChange), errCh: make(chan error)} } -func (o *once) Start() error { +func (o *once) Run(ctx context.Context) error { files, err := o.discover() if err != nil { return errors.New(err, "could not discover configuration files", errors.TypeConfig) @@ -34,18 +37,27 @@ func (o *once) Start() error { return ErrNoConfiguration } - return readfiles(context.Background(), files, o.loader, o.emitter) + cfg, err := readfiles(files, o.loader) + if err != nil { + return err + } + o.ch <- &localConfigChange{cfg} + <-ctx.Done() + return ctx.Err() +} + +func (o *once) Errors() <-chan error { + return o.errCh } -func (o *once) Stop() error { - return nil +func (o *once) Watch() <-chan coordinator.ConfigChange { + return o.ch } -func readfiles(ctx context.Context, files []string, loader *config.Loader, emitter pipeline.EmitterFunc) error { +func readfiles(files []string, loader *config.Loader) (*config.Config, error) { c, err := loader.Load(files) if err != nil { - return errors.New(err, "could not load or merge configuration", errors.TypeConfig) + return nil, fmt.Errorf("failed to load or merge configuration: %w", err) } - - return emitter(ctx, c) + return c, nil } diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 315f515a13c..79b114144cc 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -128,11 +128,12 @@ func Data() string { return filepath.Join(Top(), "data") } +// Components returns the component directory for Agent func Components() string { return filepath.Join(Home(), "components") } -// Logs returns a the log directory for Agent +// Logs returns the log directory for Agent func Logs() string { return logsPath } diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index 10a3c26c11d..bb9f717a7af 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -9,7 +9,8 @@ import ( "strings" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/filewatcher" @@ -19,35 +20,39 @@ import ( type periodic struct { log *logger.Logger period time.Duration - done chan struct{} watcher *filewatcher.Watch loader *config.Loader - emitter pipeline.EmitterFunc discover discoverFunc + ch chan coordinator.ConfigChange + errCh chan error } -func (p *periodic) Start() error { - go func() { - if err := p.work(); err != nil { - p.log.Debugf("Failed to read configuration, error: %s", err) +func (p *periodic) Run(ctx context.Context) error { + if err := p.work(); err != nil { + return err + } + + t := time.NewTicker(p.period) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: } - WORK: - for { - t := time.NewTimer(p.period) - select { - case <-p.done: - t.Stop() - break WORK - case <-t.C: - } - - if err := p.work(); err != nil { - p.log.Debugf("Failed to read configuration, error: %s", err) - } + if err := p.work(); err != nil { + return err } - }() - return nil + } +} + +func (p *periodic) Errors() <-chan error { + return p.errCh +} + +func (p *periodic) Watch() <-chan coordinator.ConfigChange { + return p.ch } func (p *periodic) work() error { @@ -92,30 +97,26 @@ func (p *periodic) work() error { p.log.Debugf("Unchanged %d files: %s", len(s.Unchanged), strings.Join(s.Updated, ", ")) } - err := readfiles(context.Background(), files, p.loader, p.emitter) + cfg, err := readfiles(files, p.loader) if err != nil { // assume something when really wrong and invalidate any cache // so we get a full new config on next tick. p.watcher.Invalidate() - return errors.New(err, "could not emit configuration") + return err } + p.ch <- &localConfigChange{cfg} + return nil } p.log.Info("No configuration change") return nil } -func (p *periodic) Stop() error { - close(p.done) - return nil -} - func newPeriodic( log *logger.Logger, period time.Duration, discover discoverFunc, loader *config.Loader, - emitter pipeline.EmitterFunc, ) *periodic { w, err := filewatcher.New(log, filewatcher.DefaultComparer) @@ -127,10 +128,27 @@ func newPeriodic( return &periodic{ log: log, period: period, - done: make(chan struct{}), watcher: w, discover: discover, loader: loader, - emitter: emitter, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), } } + +type localConfigChange struct { + cfg *config.Config +} + +func (l *localConfigChange) Config() *config.Config { + return l.cfg +} + +func (l *localConfigChange) Ack() error { + // do nothing + return nil +} + +func (l *localConfigChange) Fail(_ error) { + // do nothing +} diff --git a/internal/pkg/agent/application/pipeline/emitter/controller.go b/internal/pkg/agent/application/pipeline/emitter/controller.go deleted file mode 100644 index 7f83961586c..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/controller.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter - -import ( - "context" - "sync" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type reloadable interface { - Reload(cfg *config.Config) error -} - -// Controller is an emitter controller handling config updates. -type Controller struct { - logger *logger.Logger - agentInfo *info.AgentInfo - controller composable.Controller - router pipeline.Router - modifiers *pipeline.ConfigModifiers - reloadables []reloadable - caps capabilities.Capability - - // state - lock sync.RWMutex - updateLock sync.Mutex - config *config.Config - ast *transpiler.AST - vars []*transpiler.Vars -} - -// NewController creates a new emitter controller. -func NewController( - log *logger.Logger, - agentInfo *info.AgentInfo, - controller composable.Controller, - router pipeline.Router, - modifiers *pipeline.ConfigModifiers, - caps capabilities.Capability, - reloadables ...reloadable, -) *Controller { - init, _ := transpiler.NewVars(map[string]interface{}{}, nil) - - return &Controller{ - logger: log, - agentInfo: agentInfo, - controller: controller, - router: router, - modifiers: modifiers, - reloadables: reloadables, - vars: []*transpiler.Vars{init}, - caps: caps, - } -} - -// Update applies config change and performes all steps necessary to apply it. -func (e *Controller) Update(ctx context.Context, c *config.Config) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - - if err := info.InjectAgentConfig(c); err != nil { - return err - } - - // perform and verify ast translation - m, err := c.ToMapStr() - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - - rawAst, err := transpiler.NewAST(m) - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - - if e.caps != nil { - var ok bool - updatedAst, err := e.caps.Apply(rawAst) - if err != nil { - return errors.New(err, "failed to apply capabilities") - } - - rawAst, ok = updatedAst.(*transpiler.AST) - if !ok { - return errors.New("failed to transform object returned from capabilities to AST", errors.TypeConfig) - } - } - - for _, filter := range e.modifiers.Filters { - if err := filter(e.logger, rawAst); err != nil { - return errors.New(err, "failed to filter configuration", errors.TypeConfig) - } - } - - e.lock.Lock() - e.config = c - e.ast = rawAst - e.lock.Unlock() - - return e.update(ctx) -} - -// Set sets the transpiler vars for dynamic inputs resolution. -func (e *Controller) Set(ctx context.Context, vars []*transpiler.Vars) { - if err := e.set(ctx, vars); err != nil { - e.logger.Errorf("Failed to render configuration with latest context from composable controller: %s", err) - } -} - -func (e *Controller) set(ctx context.Context, vars []*transpiler.Vars) (err error) { - span, ctx := apm.StartSpan(ctx, "set", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - e.lock.Lock() - ast := e.ast - e.vars = vars - e.lock.Unlock() - - if ast != nil { - return e.update(ctx) - } - return nil -} - -func (e *Controller) update(ctx context.Context) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - // locking whole update because it can be called concurrently via Set and Update method - e.updateLock.Lock() - defer e.updateLock.Unlock() - - e.lock.RLock() - cfg := e.config - rawAst := e.ast - varsArray := e.vars - e.lock.RUnlock() - - ast := rawAst.Clone() - inputs, ok := transpiler.Lookup(ast, "inputs") - if ok { - renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) - if err != nil { - return err - } - err = transpiler.Insert(ast, renderedInputs, "inputs") - if err != nil { - return errors.New(err, "inserting rendered inputs failed") - } - } - - e.logger.Debug("Converting single configuration into specific programs configuration") - - programsToRun, err := program.Programs(e.agentInfo, ast) - if err != nil { - return err - } - - for _, decorator := range e.modifiers.Decorators { - for outputType, ptr := range programsToRun { - programsToRun[outputType], err = decorator(e.agentInfo, outputType, ast, ptr) - if err != nil { - return err - } - } - } - - for _, r := range e.reloadables { - if err := r.Reload(cfg); err != nil { - return err - } - } - - return e.router.Route(ctx, ast.HashStr(), programsToRun) -} diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter.go b/internal/pkg/agent/application/pipeline/emitter/emitter.go deleted file mode 100644 index ac94d48d8b4..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/emitter.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter - -import ( - "context" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// New creates a new emitter function. -func New(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, controller composable.Controller, router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, reloadables ...reloadable) (pipeline.EmitterFunc, error) { - ctrl := NewController(log, agentInfo, controller, router, modifiers, caps, reloadables...) - err := controller.Run(ctx, func(vars []*transpiler.Vars) { - ctrl.Set(ctx, vars) - }) - if err != nil { - return nil, errors.New(err, "failed to start composable controller") - } - return func(ctx context.Context, c *config.Config) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - return ctrl.Update(ctx, c) - }, nil -} diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter_test.go b/internal/pkg/agent/application/pipeline/emitter/emitter_test.go deleted file mode 100644 index a38b1bb1ded..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/emitter_test.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go deleted file mode 100644 index e1555393b84..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "github.com/elastic/go-sysinfo/types" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// InjectFleet injects fleet metadata into a configuration. -func InjectFleet(cfg *config.Config, hostInfo types.HostInfo, agentInfo *info.AgentInfo) func(*logger.Logger, *transpiler.AST) error { - return func(logger *logger.Logger, rootAst *transpiler.AST) error { - config, err := cfg.ToMapStr() - if err != nil { - return err - } - ast, err := transpiler.NewAST(config) - if err != nil { - return err - } - fleet, ok := transpiler.Lookup(ast, "fleet") - if !ok { - // no fleet from configuration; skip - return nil - } - - // copy top-level agent.* into fleet.agent.* (this gets sent to Applications in this structure) - if agent, ok := transpiler.Lookup(ast, "agent"); ok { - if err := transpiler.Insert(ast, agent, "fleet"); err != nil { - return errors.New(err, "inserting agent info failed") - } - } - - // ensure that the agent.logging.level is present - if _, found := transpiler.Lookup(ast, "agent.logging.level"); !found { - transpiler.Insert(ast, transpiler.NewKey("level", transpiler.NewStrVal(agentInfo.LogLevel())), "agent.logging") - } - - // fleet.host to Agent can be the host to connect to Fleet Server, but to Applications it should - // be the fleet.host.id. move fleet.host to fleet.hosts if fleet.hosts doesn't exist - if _, ok := transpiler.Lookup(ast, "fleet.hosts"); !ok { - if host, ok := transpiler.Lookup(ast, "fleet.host"); ok { - if key, ok := host.(*transpiler.Key); ok { - if value, ok := key.Value().(*transpiler.StrVal); ok { - hosts := transpiler.NewList([]transpiler.Node{transpiler.NewStrVal(value.String())}) - if err := transpiler.Insert(ast, hosts, "fleet.hosts"); err != nil { - return errors.New(err, "inserting fleet hosts failed") - } - } - } - } - } - - // inject host.* into fleet.host.* (this gets sent to Applications in this structure) - host := transpiler.NewKey("host", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("id", transpiler.NewStrVal(hostInfo.UniqueID)), - })) - if err := transpiler.Insert(ast, host, "fleet"); err != nil { - return errors.New(err, "inserting list of hosts failed") - } - - // inject fleet.* from local AST to the rootAST so its present when sending to Applications. - err = transpiler.Insert(rootAst, fleet.Value().(transpiler.Node), "fleet") - if err != nil { - return errors.New(err, "inserting fleet info failed") - } - return nil - } -} diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go deleted file mode 100644 index d9377aa9e61..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "crypto/md5" - "fmt" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -const ( - // MonitoringName is a name used for artificial program generated when monitoring is needed. - MonitoringName = "FLEET_MONITORING" - programsKey = "programs" - monitoringChecksumKey = "monitoring_checksum" - monitoringKey = "agent.monitoring" - monitoringUseOutputKey = "agent.monitoring.use_output" - monitoringOutputFormatKey = "outputs.%s" - outputKey = "output" - - enabledKey = "agent.monitoring.enabled" - logsKey = "agent.monitoring.logs" - metricsKey = "agent.monitoring.metrics" - outputsKey = "outputs" - elasticsearchKey = "elasticsearch" - typeKey = "type" - defaultOutputName = "default" -) - -// InjectMonitoring injects a monitoring configuration into a group of programs if needed. -func InjectMonitoring(agentInfo *info.AgentInfo, outputGroup string, rootAst *transpiler.AST, programsToRun []program.Program) ([]program.Program, error) { - var err error - monitoringProgram := program.Program{ - Spec: program.Spec{ - Name: MonitoringName, - Cmd: MonitoringName, - }, - } - - // if monitoring is not specified use default one where everything is enabled - if _, found := transpiler.Lookup(rootAst, monitoringKey); !found { - monitoringNode := transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("enabled", transpiler.NewBoolVal(true)), - transpiler.NewKey("logs", transpiler.NewBoolVal(true)), - transpiler.NewKey("metrics", transpiler.NewBoolVal(true)), - transpiler.NewKey("use_output", transpiler.NewStrVal("default")), - transpiler.NewKey("namespace", transpiler.NewStrVal("default")), - }) - - transpiler.Insert(rootAst, transpiler.NewKey("monitoring", monitoringNode), "settings") - } - - // get monitoring output name to be used - monitoringOutputName, found := transpiler.LookupString(rootAst, monitoringUseOutputKey) - if !found { - monitoringOutputName = defaultOutputName - } - - typeValue, found := transpiler.LookupString(rootAst, fmt.Sprintf("%s.%s.type", outputsKey, monitoringOutputName)) - if !found { - typeValue = elasticsearchKey - } - - ast := rootAst.Clone() - if err := getMonitoringRule(monitoringOutputName, typeValue).Apply(agentInfo, ast); err != nil { - return programsToRun, err - } - - config, err := ast.Map() - if err != nil { - return programsToRun, err - } - - programList := make([]string, 0, len(programsToRun)) - cfgHash := md5.New() - for _, p := range programsToRun { - programList = append(programList, p.Spec.CommandName()) - cfgHash.Write(p.Config.Hash()) - } - // making program list and their hashes part of the config - // so it will get regenerated with every change - config[programsKey] = programList - config[monitoringChecksumKey] = fmt.Sprintf("%x", cfgHash.Sum(nil)) - - monitoringProgram.Config, err = transpiler.NewAST(config) - if err != nil { - return programsToRun, err - } - - return append(programsToRun, monitoringProgram), nil -} - -func getMonitoringRule(outputName string, t string) *transpiler.RuleList { - monitoringOutputSelector := fmt.Sprintf(monitoringOutputFormatKey, outputName) - return transpiler.NewRuleList( - transpiler.Copy(monitoringOutputSelector, outputKey), - transpiler.Rename(fmt.Sprintf("%s.%s", outputsKey, outputName), t), - transpiler.Filter(monitoringKey, programsKey, outputKey), - ) -} diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go deleted file mode 100644 index 735e27cd725..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go +++ /dev/null @@ -1,686 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "fmt" - "testing" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/testutils" -) - -func TestMonitoringInjection(t *testing.T) { - tests := []struct { - name string - inputConfig map[string]interface{} - uname string - }{ - { - name: "testMonitoringInjection", - inputConfig: inputConfigMap, - uname: "monitoring-uname", - }, - { - name: "testMonitoringInjectionDefaults", - inputConfig: inputConfigMapDefaults, - uname: "xxx", - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - testMonitoringInjection(t, tc.inputConfig, tc.uname) - }) - } -} - -func testMonitoringInjection(t *testing.T, inputConfig map[string]interface{}, testUname string) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfig) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - outputCfg, found := cm[outputKey] - if !found { - t.Errorf("output not found for '%s'", group) - continue GROUPLOOP - } - - outputMap, ok := outputCfg.(map[string]interface{}) - if !ok { - t.Errorf("output is not a map for '%s'", group) - continue GROUPLOOP - } - - esCfg, found := outputMap["elasticsearch"] - if !found { - t.Errorf("elasticsearch output not found for '%s'", group) - continue GROUPLOOP - } - - esMap, ok := esCfg.(map[string]interface{}) - if !ok { - t.Errorf("output.elasticsearch is not a map for '%s'", group) - continue GROUPLOOP - } - - if uname, found := esMap["username"]; !found { - t.Errorf("output.elasticsearch.username output not found for '%s'", group) - continue GROUPLOOP - } else if uname != testUname { - t.Errorf("output.elasticsearch.username has incorrect value expected '%s', got '%s for %s", "monitoring-uname", uname, group) - continue GROUPLOOP - } - } - } -} - -func TestMonitoringToLogstashInjection(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfigLS) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - outputCfg, found := cm[outputKey] - if !found { - t.Errorf("output not found for '%s'", group) - continue GROUPLOOP - } - - outputMap, ok := outputCfg.(map[string]interface{}) - if !ok { - t.Errorf("output is not a map for '%s'", group) - continue GROUPLOOP - } - - esCfg, found := outputMap["logstash"] - if !found { - t.Errorf("logstash output not found for '%s' %v", group, outputMap) - continue GROUPLOOP - } - - esMap, ok := esCfg.(map[string]interface{}) - if !ok { - t.Errorf("output.logstash is not a map for '%s'", group) - continue GROUPLOOP - } - - if uname, found := esMap["hosts"]; !found { - t.Errorf("output.logstash.hosts output not found for '%s'", group) - continue GROUPLOOP - } else if uname != "192.168.1.2" { - t.Errorf("output.logstash.hosts has incorrect value expected '%s', got '%s for %s", "monitoring-uname", uname, group) - continue GROUPLOOP - } - } - } -} - -func TestMonitoringInjectionDisabled(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfigMapDisabled) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 2 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 2)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - // is enabled set - agentObj, found := cm["agent"] - if !found { - t.Errorf("settings not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - agentMap, ok := agentObj.(map[string]interface{}) - if !ok { - t.Errorf("settings not a map for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringObj, found := agentMap["monitoring"] - if !found { - t.Errorf("agent.monitoring not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringMap, ok := monitoringObj.(map[string]interface{}) - if !ok { - t.Errorf("agent.monitoring not a map for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - enabledVal, found := monitoringMap["enabled"] - if !found { - t.Errorf("monitoring.enabled not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringEnabled, ok := enabledVal.(bool) - if !ok { - t.Errorf("agent.monitoring.enabled is not a bool for '%s'", group) - continue GROUPLOOP - } - - if monitoringEnabled { - t.Errorf("agent.monitoring.enabled is enabled, should be disabled for '%s'", group) - continue GROUPLOOP - } - } - } -} - -func TestChangeInMonitoringWithChangeInInput(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - - astBefore, err := transpiler.NewAST(inputChange1) - if err != nil { - t.Fatal(err) - } - - programsToRunBefore, err := program.Programs(agentInfo, astBefore) - if err != nil { - t.Fatal(err) - } - - if len(programsToRunBefore) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - - astAfter, err := transpiler.NewAST(inputChange2) - if err != nil { - t.Fatal(err) - } - - programsToRunAfter, err := program.Programs(agentInfo, astAfter) - if err != nil { - t.Fatal(err) - } - - if len(programsToRunAfter) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - - // inject to both - var hashConfigBefore, hashConfigAfter string -GROUPLOOPBEFORE: - for group, ptr := range programsToRunBefore { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, astBefore, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOPBEFORE - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOPBEFORE - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - hashConfigBefore = p.Config.HashStr() - } - } - -GROUPLOOPAFTER: - for group, ptr := range programsToRunAfter { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, astAfter, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOPAFTER - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOPAFTER - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - hashConfigAfter = p.Config.HashStr() - } - } - - if hashConfigAfter == "" || hashConfigBefore == "" { - t.Fatal("hash configs uninitialized") - } - - if hashConfigAfter == hashConfigBefore { - t.Fatal("hash config equal, expected to be different") - } -} - -var inputConfigMap = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputConfigMapDefaults = map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputConfigMapDisabled = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": false, - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputChange1 = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} - -var inputChange2 = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - {"paths": "/yyyy"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} - -var inputConfigLS = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "logstash", - "hosts": "192.168.1.2", - "ssl.certificate_authorities": []string{"/etc/pki.key"}, - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} diff --git a/internal/pkg/agent/application/pipeline/pipeline.go b/internal/pkg/agent/application/pipeline/pipeline.go deleted file mode 100644 index 764d920cff9..00000000000 --- a/internal/pkg/agent/application/pipeline/pipeline.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package pipeline - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// ConfigHandler is capable of handling configrequest. -type ConfigHandler interface { - HandleConfig(context.Context, configrequest.Request) error - Close() error - Shutdown() -} - -// DefaultRK default routing keys until we implement the routing key / config matrix. -var DefaultRK = "default" - -// RoutingKey is used for routing as pipeline id. -type RoutingKey = string - -// Router is an interface routing programs to the corresponding stream. -type Router interface { - Routes() *sorted.Set - Route(ctx context.Context, id string, grpProg map[RoutingKey][]program.Program) error - Shutdown() -} - -// StreamFunc creates a stream out of routing key. -type StreamFunc func(*logger.Logger, RoutingKey) (Stream, error) - -// Stream is capable of executing configrequest change. -type Stream interface { - Execute(context.Context, configrequest.Request) error - Close() error - Shutdown() -} - -// EmitterFunc emits configuration for processing. -type EmitterFunc func(context.Context, *config.Config) error - -// DecoratorFunc is a func for decorating a retrieved configuration before processing. -type DecoratorFunc = func(*info.AgentInfo, string, *transpiler.AST, []program.Program) ([]program.Program, error) - -// FilterFunc is a func for filtering a retrieved configuration before processing. -type FilterFunc = func(*logger.Logger, *transpiler.AST) error - -// ConfigModifiers is a collections of filters and decorators applied while processing configuration. -type ConfigModifiers struct { - Filters []FilterFunc - Decorators []DecoratorFunc -} - -// Dispatcher processes actions coming from fleet api. -type Dispatcher interface { - Dispatch(context.Context, store.FleetAcker, ...fleetapi.Action) error -} diff --git a/internal/pkg/agent/application/pipeline/router/router.go b/internal/pkg/agent/application/pipeline/router/router.go deleted file mode 100644 index e1f1d63c8b5..00000000000 --- a/internal/pkg/agent/application/pipeline/router/router.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package router - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type router struct { - log *logger.Logger - routes *sorted.Set - streamFactory pipeline.StreamFunc -} - -// New creates a new router. -func New(log *logger.Logger, factory pipeline.StreamFunc) (pipeline.Router, error) { - var err error - if log == nil { - log, err = logger.New("router", false) - if err != nil { - return nil, err - } - } - return &router{log: log, streamFactory: factory, routes: sorted.NewSet()}, nil -} - -func (r *router) Routes() *sorted.Set { - return r.routes -} - -func (r *router) Route(ctx context.Context, id string, grpProg map[pipeline.RoutingKey][]program.Program) error { - s := sorted.NewSet() - - // Make sure that starting and updating is always done in the same order. - for rk, programs := range grpProg { - s.Add(rk, programs) - } - - active := make(map[string]bool, len(grpProg)) - for _, rk := range s.Keys() { - active[rk] = true - - // Are we already runnings this streams? - // When it doesn't exist we just create it, if it already exist we forward the configuration. - p, ok := r.routes.Get(rk) - var err error - if !ok { - r.log.Debugf("Creating stream: %s", rk) - p, err = r.streamFactory(r.log, rk) - if err != nil { - return err - } - r.routes.Add(rk, p) - } - - programs, ok := s.Get(rk) - if !ok { - return fmt.Errorf("could not find programs for routing key %s", rk) - } - - req := configrequest.New(id, time.Now(), programs.([]program.Program)) - - r.log.Debugf( - "Streams %s need to run config with ID %s and programs: %s", - rk, - req.ShortID(), - strings.Join(req.ProgramNames(), ", "), - ) - - err = p.(pipeline.Stream).Execute(ctx, req) - if err != nil { - return err - } - } - - // cleanup inactive streams. - // streams are shutdown down in alphabetical order. - keys := r.routes.Keys() - for _, k := range keys { - _, ok := active[k] - if ok { - continue - } - - p, ok := r.routes.Get(k) - if !ok { - continue - } - - r.log.Debugf("Removing routing key %s", k) - - p.(pipeline.Stream).Close() - r.routes.Remove(k) - } - - return nil -} - -// Shutdown shutdowns the router because Agent is stopping. -func (r *router) Shutdown() { - keys := r.routes.Keys() - for _, k := range keys { - p, ok := r.routes.Get(k) - if !ok { - continue - } - p.(pipeline.Stream).Shutdown() - r.routes.Remove(k) - } -} diff --git a/internal/pkg/agent/application/pipeline/router/router_test.go b/internal/pkg/agent/application/pipeline/router/router_test.go deleted file mode 100644 index 75f33231b1b..00000000000 --- a/internal/pkg/agent/application/pipeline/router/router_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package router - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type rOp int - -const ( - createOp rOp = iota + 1 - executeOp - closeOp -) - -func (r *rOp) String() string { - m := map[rOp]string{ - 1: "create", - 2: "execute", - 3: "close", - } - v, ok := m[*r] - if !ok { - return "unknown operation" - } - return v -} - -type event struct { - rk pipeline.RoutingKey - op rOp -} - -type notifyFunc func(pipeline.RoutingKey, rOp, ...interface{}) - -func TestRouter(t *testing.T) { - programs := []program.Program{{Spec: getRandomSpec()}} - ctx := context.Background() - - t.Run("create new and destroy unused stream", func(t *testing.T) { - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - nk := "NEW_KEY" - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - nk: programs, - }) - - assertOps(t, []event{ - e(nk, createOp), - e(nk, executeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) - - t.Run("multiples create new and destroy unused stream", func(t *testing.T) { - k1 := "KEY_1" - k2 := "KEY_2" - - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - k1: programs, - k2: programs, - }) - - assertOps(t, []event{ - e(k1, createOp), - e(k1, executeOp), - - e(k2, createOp), - e(k2, executeOp), - - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - nk := "SECOND_DISPATCH" - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - nk: programs, - }) - - assertOps(t, []event{ - e(nk, createOp), - e(nk, executeOp), - - e(k1, closeOp), - e(k2, closeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) - - t.Run("create new and delegate program to existing stream", func(t *testing.T) { - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - }) - - t.Run("when no stream are detected we shutdown all the running streams", func(t *testing.T) { - k1 := "KEY_1" - k2 := "KEY_2" - - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - k1: programs, - k2: programs, - }) - - assertOps(t, []event{ - e(k1, createOp), - e(k1, executeOp), - e(k2, createOp), - e(k2, executeOp), - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{}) - - assertOps(t, []event{ - e(k1, closeOp), - e(k2, closeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) -} - -type recorder struct { - events []event -} - -func (r *recorder) factory(_ *logger.Logger, rk pipeline.RoutingKey) (pipeline.Stream, error) { - return newMockStream(rk, r.notify), nil -} - -func (r *recorder) notify(rk pipeline.RoutingKey, op rOp, args ...interface{}) { - r.events = append(r.events, e(rk, op)) -} - -func (r *recorder) reset() { - r.events = nil -} - -type mockStream struct { - rk pipeline.RoutingKey - notify notifyFunc -} - -func newMockStream(rk pipeline.RoutingKey, notify notifyFunc) *mockStream { - notify(rk, createOp) - return &mockStream{ - rk: rk, - notify: notify, - } -} - -func (m *mockStream) Execute(_ context.Context, req configrequest.Request) error { - m.event(executeOp, req) - return nil -} - -func (m *mockStream) Close() error { - m.event(closeOp) - return nil -} - -func (m *mockStream) Shutdown() {} - -func (m *mockStream) event(op rOp, args ...interface{}) { - m.notify(m.rk, op, args...) -} - -func assertOps(t *testing.T, expected []event, received []event) { - require.Equal(t, len(expected), len(received), "Received number of operation doesn't match") - require.Equal(t, expected, received) -} - -func e(rk pipeline.RoutingKey, op rOp) event { - return event{rk: rk, op: op} -} - -func getRandomSpec() program.Spec { - return program.Supported[1] -} diff --git a/internal/pkg/agent/application/pipeline/stream/factory.go b/internal/pkg/agent/application/pipeline/stream/factory.go deleted file mode 100644 index b7701e70e99..00000000000 --- a/internal/pkg/agent/application/pipeline/stream/factory.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stream - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - downloader "github.com/elastic/elastic-agent/internal/pkg/artifact/download/localremote" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// Factory creates a new stream factory. -func Factory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor, statusController status.Controller) func(*logger.Logger, pipeline.RoutingKey) (pipeline.Stream, error) { - return func(log *logger.Logger, id pipeline.RoutingKey) (pipeline.Stream, error) { - // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, agentInfo, id, cfg, srv, r, m, statusController) - if err != nil { - return nil, err - } - - return &operatorStream{ - log: log, - configHandler: operator, - }, nil - } -} - -func newOperator( - ctx context.Context, - log *logger.Logger, - agentInfo *info.AgentInfo, - id pipeline.RoutingKey, - config *configuration.SettingsConfig, - srv *server.Server, - r state.Reporter, - m monitoring.Monitor, - statusController status.Controller, -) (*operation.Operator, error) { - fetcher, err := downloader.NewDownloader(log, config.DownloadConfig) - if err != nil { - return nil, err - } - - allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) - if err != nil { - return nil, errors.New(err, "initiating verifier") - } - - installer, err := install.NewInstaller(config.DownloadConfig) - if err != nil { - return nil, errors.New(err, "initiating installer") - } - - uninstaller, err := uninstall.NewUninstaller() - if err != nil { - return nil, errors.New(err, "initiating uninstaller") - } - - stateResolver, err := stateresolver.NewStateResolver(log) - if err != nil { - return nil, err - } - - return operation.NewOperator( - ctx, - log, - agentInfo, - id, - config, - fetcher, - verifier, - installer, - uninstaller, - stateResolver, - srv, - r, - m, - statusController, - ) -} diff --git a/internal/pkg/agent/application/pipeline/stream/operator_stream.go b/internal/pkg/agent/application/pipeline/stream/operator_stream.go deleted file mode 100644 index ee4ee44079e..00000000000 --- a/internal/pkg/agent/application/pipeline/stream/operator_stream.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stream - -import ( - "context" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type operatorStream struct { - configHandler pipeline.ConfigHandler - log *logger.Logger -} - -type stater interface { - State() map[string]state.State -} - -type specer interface { - Specs() map[string]program.Spec -} - -func (b *operatorStream) Close() error { - return b.configHandler.Close() -} - -func (b *operatorStream) State() map[string]state.State { - if s, ok := b.configHandler.(stater); ok { - return s.State() - } - - return nil -} - -func (b *operatorStream) Specs() map[string]program.Spec { - if s, ok := b.configHandler.(specer); ok { - return s.Specs() - } - return nil -} - -func (b *operatorStream) Execute(ctx context.Context, cfg configrequest.Request) (err error) { - span, ctx := apm.StartSpan(ctx, "route", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - return b.configHandler.HandleConfig(ctx, cfg) -} - -func (b *operatorStream) Shutdown() { - b.configHandler.Shutdown() -} diff --git a/internal/pkg/agent/application/upgrade/error_checker.go b/internal/pkg/agent/application/upgrade/error_checker.go index 099526b990b..8e308c4e080 100644 --- a/internal/pkg/agent/application/upgrade/error_checker.go +++ b/internal/pkg/agent/application/upgrade/error_checker.go @@ -64,7 +64,7 @@ func (ch *ErrorChecker) Run(ctx context.Context) { continue } - status, err := ch.agentClient.Status(ctx) + state, err := ch.agentClient.State(ctx) ch.agentClient.Disconnect() if err != nil { ch.log.Error("failed retrieving agent status", err) @@ -78,14 +78,14 @@ func (ch *ErrorChecker) Run(ctx context.Context) { // call was successful, reset counter ch.failuresCounter = 0 - if status.Status == client.Failed { + if state.State == client.Failed { ch.log.Error("error checker notifying failure of agent") ch.notifyChan <- ErrAgentStatusFailed } - for _, app := range status.Applications { - if app.Status == client.Failed { - err = multierror.Append(err, errors.New(fmt.Sprintf("application %s[%v] failed: %s", app.Name, app.ID, app.Message))) + for _, comp := range state.Components { + if comp.State == client.Failed { + err = multierror.Append(err, errors.New(fmt.Sprintf("component %s[%v] failed: %s", comp.Name, comp.ID, comp.Message))) } } diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index e176e4c5b96..51b0adbb184 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -39,7 +39,7 @@ type UpdateMarker struct { } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) error { +func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() prevHash := release.Commit() if len(prevHash) > hashLen { @@ -51,7 +51,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) er UpdatedOn: time.Now(), PrevVersion: prevVersion, PrevHash: prevHash, - Action: action.FleetAction(), + Action: action, } markerBytes, err := yaml.Marshal(marker) diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index e2fe530ff77..cb6f827d8e2 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -14,6 +14,8 @@ import ( "runtime" "strings" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/otiai10/copy" "go.elastic.co/apm" @@ -25,8 +27,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -47,40 +47,16 @@ var ( } ) +var ( + // ErrSameVersion error is returned when the upgrade results in the same installed version. + ErrSameVersion = errors.New("upgrade did not occur because its the same version") +) + // Upgrader performs an upgrade type Upgrader struct { - agentInfo *info.AgentInfo - settings *artifact.Config log *logger.Logger - closers []context.CancelFunc - reexec reexecManager - acker acker - reporter stateReporter + settings *artifact.Config upgradeable bool - caps capabilities.Capability -} - -// Action is the upgrade action state. -type Action interface { - // Version to upgrade to. - Version() string - // SourceURI for download. - SourceURI() string - // FleetAction is the action from fleet that started the action (optional). - FleetAction() *fleetapi.ActionUpgrade -} - -type reexecManager interface { - ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) -} - -type acker interface { - Ack(ctx context.Context, action fleetapi.Action) error - Commit(ctx context.Context) error -} - -type stateReporter interface { - OnStateChange(id string, name string, s state.State) } // IsUpgradeable when agent is installed and running as a service or flag was provided. @@ -91,17 +67,11 @@ func IsUpgradeable() bool { } // NewUpgrader creates an upgrader which is capable of performing upgrade operation -func NewUpgrader(agentInfo *info.AgentInfo, settings *artifact.Config, log *logger.Logger, closers []context.CancelFunc, reexec reexecManager, a acker, r stateReporter, caps capabilities.Capability) *Upgrader { +func NewUpgrader(log *logger.Logger, settings *artifact.Config) *Upgrader { return &Upgrader{ - agentInfo: agentInfo, - settings: settings, log: log, - closers: closers, - reexec: reexec, - acker: a, - reporter: r, + settings: settings, upgradeable: IsUpgradeable(), - caps: caps, } } @@ -112,40 +82,17 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback if some needs to be executed for cases when // reexec is called by caller. -func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ reexec.ShutdownCallbackFn, err error) { +func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() - // report failed - defer func() { - if err != nil { - if action := a.FleetAction(); action != nil { - u.reportFailure(ctx, action, err) - } - apm.CaptureError(ctx, err).Send() - } - }() - - if !u.upgradeable { - return nil, fmt.Errorf( - "cannot be upgraded; must be installed with install sub-command and " + - "running under control of the systems supervisor") - } - if u.caps != nil { - if _, err := u.caps.Apply(a); errors.Is(err, capabilities.ErrBlocked) { - return nil, nil - } - } - - u.reportUpdating(a.Version()) - - sourceURI := u.sourceURI(a.SourceURI()) - archivePath, err := u.downloadArtifact(ctx, a.Version(), sourceURI) + sourceURI = u.sourceURI(sourceURI) + archivePath, err := u.downloadArtifact(ctx, version, sourceURI) if err != nil { return nil, err } - newHash, err := u.unpack(ctx, a.Version(), archivePath) + newHash, err := u.unpack(ctx, version, archivePath) if err != nil { return nil, err } @@ -155,13 +102,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree } if strings.HasPrefix(release.Commit(), newHash) { - // not an error - if action := a.FleetAction(); action != nil { - //nolint:errcheck // keeping the same behavior, and making linter happy - u.ackAction(ctx, action) - } - u.log.Warn("upgrading to same version") - return nil, nil + return nil, ErrSameVersion } // Copy vault directory for linux/windows only @@ -182,7 +123,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, err } - if err := u.markUpgrade(ctx, newHash, a); err != nil { + if err := u.markUpgrade(ctx, newHash, action); err != nil { rollbackInstall(ctx, newHash) return nil, err } @@ -192,17 +133,12 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, errors.New("failed to invoke rollback watcher", err) } - cb := shutdownCallback(u.log, paths.Home(), release.Version(), a.Version(), release.TrimCommit(newHash)) - if reexecNow { - u.reexec.ReExec(cb) - return nil, nil - } - + cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) return cb, nil } // Ack acks last upgrade action -func (u *Upgrader) Ack(ctx context.Context) error { +func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { // get upgrade action marker, err := LoadMarker() if err != nil { @@ -216,7 +152,11 @@ func (u *Upgrader) Ack(ctx context.Context) error { return nil } - if err := u.ackAction(ctx, marker.Action); err != nil { + if err := acker.Ack(ctx, marker.Action); err != nil { + return err + } + + if err := acker.Commit(ctx); err != nil { return err } @@ -231,50 +171,6 @@ func (u *Upgrader) sourceURI(retrievedURI string) string { return u.settings.SourceURI } -// ackAction is used for successful updates, it was either updated successfully or to the same version -// so we need to remove updating state and get prevent from receiving same update action again. -func (u *Upgrader) ackAction(ctx context.Context, action fleetapi.Action) error { - if err := u.acker.Ack(ctx, action); err != nil { - return err - } - - if err := u.acker.Commit(ctx); err != nil { - return err - } - - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Healthy}, - ) - - return nil -} - -// report failure is used when update process fails. action is acked so it won't be received again -// and state is changed to FAILED -func (u *Upgrader) reportFailure(ctx context.Context, action fleetapi.Action, err error) { - // ack action - _ = u.acker.Ack(ctx, action) - - // report failure - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Failed, Message: err.Error()}, - ) -} - -// reportUpdating sets state of agent to updating. -func (u *Upgrader) reportUpdating(version string) { - // report failure - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Updating, Message: fmt.Sprintf("Update to version '%s' started", version)}, - ) -} - func rollbackInstall(ctx context.Context, hash string) { os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) _ = ChangeSymlink(ctx, release.ShortCommit()) diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index ad3a77e1a4f..718e1c4596f 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -428,29 +428,31 @@ func gatherConfig() (AgentConfig, error) { } cfg.ConfigRendered = mapCFG - // Gather vars to render process config - isStandalone, err := isStandalone(renderedCFG) - if err != nil { - return AgentConfig{}, err - } + /* + // Gather vars to render process config + isStandalone, err := isStandalone(renderedCFG) + if err != nil { + return AgentConfig{}, err + } - log, err := newErrorLogger() - if err != nil { - return AgentConfig{}, err - } + log, err := newErrorLogger() + if err != nil { + return AgentConfig{}, err + } - // Get process config - uses same approach as inspect output command. - // Does not contact server process to request configs. - pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) - if err != nil { - return AgentConfig{}, err - } - cfg.AppConfig = make(map[string]interface{}, 0) - for rk, programs := range pMap { - for _, p := range programs { - cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() + // Get process config - uses same approach as inspect output command. + // Does not contact server process to request configs. + pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) + if err != nil { + return AgentConfig{}, err } - } + cfg.AppConfig = make(map[string]interface{}, 0) + for rk, programs := range pMap { + for _, p := range programs { + cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() + } + } + */ return cfg, nil } diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index a886ca5bafb..77519772fe7 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -28,7 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" @@ -303,7 +302,7 @@ func (c *enrollCmd) writeDelayEnroll(streams *cli.IOStreams) error { func (c *enrollCmd) fleetServerBootstrap(ctx context.Context, persistentConfig map[string]interface{}) (string, error) { c.log.Debug("verifying communication with running Elastic Agent daemon") agentRunning := true - _, err := getDaemonStatus(ctx) + _, err := getDaemonState(ctx) if err != nil { if !c.options.FleetServer.SpawnAgent { // wait longer to try and communicate with the Elastic Agent @@ -641,7 +640,7 @@ func delay(ctx context.Context, d time.Duration) { } } -func getDaemonStatus(ctx context.Context) (*client.AgentStatus, error) { +func getDaemonState(ctx context.Context) (*client.AgentState, error) { ctx, cancel := context.WithTimeout(ctx, daemonTimeout) defer cancel() daemon := client.New() @@ -650,7 +649,7 @@ func getDaemonStatus(ctx context.Context) (*client.AgentStatus, error) { return nil, err } defer daemon.Disconnect() - return daemon.Status(ctx) + return daemon.State(ctx) } type waitResult struct { @@ -680,7 +679,7 @@ func waitForAgent(ctx context.Context, timeout time.Duration) error { backOff := expBackoffWithContext(innerCtx, 1*time.Second, maxBackoff) for { backOff.Wait() - _, err := getDaemonStatus(innerCtx) + _, err := getDaemonState(innerCtx) if errors.Is(err, context.Canceled) { resChan <- waitResult{err: err} return @@ -730,7 +729,7 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat backExp := expBackoffWithContext(innerCtx, 1*time.Second, maxBackoff) for { backExp.Wait() - status, err := getDaemonStatus(innerCtx) + state, err := getDaemonState(innerCtx) if errors.Is(err, context.Canceled) { resChan <- waitResult{err: err} return @@ -750,8 +749,8 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat } continue } - app := getAppFromStatus(status, "fleet-server") - if app == nil { + unit := getCompUnitFromStatus(state, "fleet-server") + if unit == nil { err = errors.New("no fleet-server application running") log.Debugf("%s: %s", waitingForFleetServer, err) if msg != waitingForFleetServer { @@ -767,16 +766,16 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat } continue } - log.Debugf("%s: %s - %s", waitingForFleetServer, app.Status, app.Message) - if app.Status == cproto.Status_DEGRADED || app.Status == cproto.Status_HEALTHY { + log.Debugf("%s: %s - %s", waitingForFleetServer, unit.State, unit.Message) + if unit.State == client.Degraded || unit.State == client.Healthy { // app has started and is running - if app.Message != "" { - log.Infof("Fleet Server - %s", app.Message) + if unit.Message != "" { + log.Infof("Fleet Server - %s", unit.Message) } // extract the enrollment token from the status payload token := "" - if app.Payload != nil { - if enrollToken, ok := app.Payload["enrollment_token"]; ok { + if unit.Payload != nil { + if enrollToken, ok := unit.Payload["enrollment_token"]; ok { if tokenStr, ok := enrollToken.(string); ok { token = tokenStr } @@ -785,8 +784,8 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat resChan <- waitResult{enrollmentToken: token} break } - if app.Message != "" { - appMsg := fmt.Sprintf("Fleet Server - %s", app.Message) + if unit.Message != "" { + appMsg := fmt.Sprintf("Fleet Server - %s", unit.Message) if msg != appMsg { msg = appMsg msgCount = 0 @@ -827,10 +826,14 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat return res.enrollmentToken, nil } -func getAppFromStatus(status *client.AgentStatus, name string) *client.ApplicationStatus { - for _, app := range status.Applications { - if app.Name == name { - return app +func getCompUnitFromStatus(state *client.AgentState, name string) *client.ComponentUnitState { + for _, comp := range state.Components { + if comp.Name == name { + for _, unit := range comp.Units { + if unit.UnitType == client.UnitTypeInput { + return &unit + } + } } } return nil diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index d7832f48772..03ea093cab6 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -5,34 +5,11 @@ package cmd import ( - "context" - "fmt" - "os" - "github.com/spf13/cobra" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/cli" - "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/config/operations" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/go-sysinfo" ) func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Command { @@ -42,10 +19,12 @@ func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Comman Long: "Shows current configuration of the agent", Args: cobra.ExactArgs(0), Run: func(c *cobra.Command, args []string) { - if err := inspectConfig(paths.ConfigFile()); err != nil { - fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) - os.Exit(1) - } + /* + if err := inspectConfig(paths.ConfigFile()); err != nil { + fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + os.Exit(1) + } + */ }, } @@ -61,19 +40,22 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { Long: "Displays configuration generated for output.\nIf no output is specified list of output is displayed", Args: cobra.MaximumNArgs(2), RunE: func(c *cobra.Command, args []string) error { - outName, _ := c.Flags().GetString("output") - program, _ := c.Flags().GetString("program") - cfgPath := paths.ConfigFile() - agentInfo, err := info.NewAgentInfo(false) - if err != nil { - return err - } - - if outName == "" { - return inspectOutputs(cfgPath, agentInfo) - } - - return inspectOutput(cfgPath, outName, program, agentInfo) + /* + outName, _ := c.Flags().GetString("output") + program, _ := c.Flags().GetString("program") + cfgPath := paths.ConfigFile() + agentInfo, err := info.NewAgentInfo(false) + if err != nil { + return err + } + + if outName == "" { + return inspectOutputs(cfgPath, agentInfo) + } + + return inspectOutput(cfgPath, outName, program, agentInfo) + */ + return nil }, } @@ -83,6 +65,7 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { return cmd } +/* func inspectConfig(cfgPath string) error { err := tryContainerLoadPaths() if err != nil { @@ -102,7 +85,7 @@ func printMapStringConfig(mapStr map[string]interface{}) error { if err != nil { return err } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l, status.NewController(l)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) if err != nil { return err } @@ -279,7 +262,7 @@ func getProgramsFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, cfg *c configModifiers.Filters = append(configModifiers.Filters, modifiers.InjectFleet(cfg, sysInfo.Info(), agentInfo)) } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) if err != nil { return nil, err } @@ -374,17 +357,23 @@ func newWaitForCompose(wrapped composable.Controller) *waitForCompose { } } -func (w *waitForCompose) Run(ctx context.Context, cb composable.VarsCallback) error { - err := w.controller.Run(ctx, func(vars []*transpiler.Vars) { - cb(vars) - w.done <- true - }) +func (w *waitForCompose) Run(ctx context.Context) error { + err := w.controller.Run(ctx) return err } +func (w *waitForCompose) Errors() <-chan error { + return nil +} + +func (w *waitForCompose) Watch() <-chan []*transpiler.Vars { + return nil +} + func (w *waitForCompose) Wait() { <-w.done } +*/ func isStandalone(cfg *config.Config) (bool, error) { c, err := configuration.NewFromConfig(cfg) diff --git a/internal/pkg/agent/cmd/inspect_test.go b/internal/pkg/agent/cmd/inspect_test.go index 361f77d5904..3a5ffb35380 100644 --- a/internal/pkg/agent/cmd/inspect_test.go +++ b/internal/pkg/agent/cmd/inspect_test.go @@ -4,6 +4,7 @@ package cmd +/* import ( "testing" ) @@ -49,3 +50,4 @@ func TestGetFleetInput(t *testing.T) { }) } } +*/ diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index b584baf2f09..e2d3fc0e751 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -19,11 +19,7 @@ import ( apmtransport "go.elastic.co/apm/transport" "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent-libs/api" - "github.com/elastic/elastic-agent-libs/monitoring" "github.com/elastic/elastic-agent-libs/service" - "github.com/elastic/elastic-agent-system-metrics/report" - "github.com/elastic/elastic-agent/internal/pkg/agent/application" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" @@ -37,13 +33,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - monitoringServer "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/server" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/version" ) const ( @@ -65,7 +57,7 @@ func newRunCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { } } -func run(override cfgOverrider) error { +func run(override cfgOverrider, modifiers ...application.PlatformModifier) error { // Windows: Mark service as stopped. // After this is run, the service is considered by the OS to be stopped. // This must be the first deferred cleanup task (last to execute). @@ -123,7 +115,7 @@ func run(override cfgOverrider) error { // that writes the agentID into fleet.enc (encrypted fleet.yml) before even loading the configuration. err = secret.CreateAgentSecret() if err != nil { - return err + return fmt.Errorf("failed to read/write secrets: %w", err) } agentInfo, err := info.NewAgentInfoWithLog(defaultLogLevel(cfg), createAgentID) @@ -151,8 +143,6 @@ func run(override cfgOverrider) error { rexLogger := logger.Named("reexec") rex := reexec.NewManager(rexLogger, execPath) - statusCtrl := status.NewController(logger) - tracer, err := initTracer(agentName, release.Version(), cfg.Settings.MonitoringConfig) if err != nil { return fmt.Errorf("could not initiate APM tracer: %w", err) @@ -167,32 +157,37 @@ func run(override cfgOverrider) error { logger.Info("APM instrumentation disabled") } - control := server.New(logger.Named("control"), rex, statusCtrl, nil, tracer) - // start the control listener - if err := control.Start(); err != nil { - return err - } - defer control.Stop() - - app, err := application.New(logger, rex, statusCtrl, control, agentInfo, tracer) + app, err := application.New(logger, agentInfo, rex, tracer, modifiers...) if err != nil { return err } - control.SetRouteFn(app.Routes) - control.SetMonitoringCfg(cfg.Settings.MonitoringConfig) - - serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) - if err != nil { + control := server.New(logger.Named("control"), cfg.Settings.MonitoringConfig, app, tracer) + // start the control listener + if err := control.Start(); err != nil { return err } - defer func() { - _ = serverStopFn() - }() + defer control.Stop() - if err := app.Start(); err != nil { - return err - } + /* + serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) + if err != nil { + return err + } + defer func() { + _ = serverStopFn() + }() + */ + + appDone := make(chan bool) + appErrCh := make(chan error) + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + go func() { + err := app.Run(ctx) + close(appDone) + appErrCh <- err + }() // listen for signals signals := make(chan os.Signal, 1) @@ -203,6 +198,8 @@ func run(override cfgOverrider) error { select { case <-stop: breakout = true + case <-appDone: + breakout = true case <-rex.ShutdownChan(): reexecing = true breakout = true @@ -222,7 +219,9 @@ func run(override cfgOverrider) error { } } - err = app.Stop() + cancel() + err = <-appErrCh + if !reexecing { logger.Info("Shutting down completed.") return err @@ -330,6 +329,7 @@ func defaultLogLevel(cfg *configuration.Configuration) string { return defaultLogLevel } +/* func setupMetrics( _ *info.AgentInfo, logger *logger.Logger, @@ -366,6 +366,7 @@ func setupMetrics( func isProcessStatsEnabled(cfg *monitoringCfg.MonitoringHTTPConfig) bool { return cfg != nil && cfg.Enabled } +*/ func tryDelayEnroll(ctx context.Context, logger *logger.Logger, cfg *configuration.Configuration, override cfgOverrider) (*configuration.Configuration, error) { enrollPath := paths.AgentEnrollFile() diff --git a/internal/pkg/agent/cmd/status.go b/internal/pkg/agent/cmd/status.go index f3649bafdc9..2f748e6dc89 100644 --- a/internal/pkg/agent/cmd/status.go +++ b/internal/pkg/agent/cmd/status.go @@ -25,7 +25,7 @@ import ( type outputter func(io.Writer, interface{}) error var statusOutputs = map[string]outputter{ - "human": humanStatusOutput, + "human": humanStateOutput, "json": jsonOutput, "yaml": yamlOutput, } @@ -64,7 +64,7 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error innerCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - status, err := getDaemonStatus(innerCtx) + state, err := getDaemonState(innerCtx) if errors.Is(err, context.DeadlineExceeded) { return errors.New("timed out after 30 seconds trying to connect to Elastic Agent daemon") } else if errors.Is(err, context.Canceled) { @@ -73,12 +73,12 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error return fmt.Errorf("failed to communicate with Elastic Agent daemon: %w", err) } - err = outputFunc(streams.Out, status) + err = outputFunc(streams.Out, state) if err != nil { return err } // exit 0 only if the Elastic Agent daemon is healthy - if status.Status == client.Healthy { + if state.State == client.Healthy { os.Exit(0) } else { os.Exit(1) @@ -86,32 +86,32 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error return nil } -func humanStatusOutput(w io.Writer, obj interface{}) error { - status, ok := obj.(*client.AgentStatus) +func humanStateOutput(w io.Writer, obj interface{}) error { + status, ok := obj.(*client.AgentState) if !ok { return fmt.Errorf("unable to cast %T as *client.AgentStatus", obj) } - return outputStatus(w, status) + return outputState(w, status) } -func outputStatus(w io.Writer, status *client.AgentStatus) error { - fmt.Fprintf(w, "Status: %s\n", status.Status) - if status.Message == "" { +func outputState(w io.Writer, state *client.AgentState) error { + fmt.Fprintf(w, "State: %s\n", state.State) + if state.Message == "" { fmt.Fprint(w, "Message: (no message)\n") } else { - fmt.Fprintf(w, "Message: %s\n", status.Message) + fmt.Fprintf(w, "Message: %s\n", state.Message) } - if len(status.Applications) == 0 { - fmt.Fprint(w, "Applications: (none)\n") + if len(state.Components) == 0 { + fmt.Fprint(w, "Components: (none)\n") } else { - fmt.Fprint(w, "Applications:\n") + fmt.Fprint(w, "Components:\n") tw := tabwriter.NewWriter(w, 4, 1, 2, ' ', 0) - for _, app := range status.Applications { - fmt.Fprintf(tw, " * %s\t(%s)\n", app.Name, app.Status) - if app.Message == "" { + for _, comp := range state.Components { + fmt.Fprintf(tw, " * %s\t(%s)\n", comp.Name, comp.State) + if comp.Message == "" { fmt.Fprint(tw, "\t(no message)\n") } else { - fmt.Fprintf(tw, "\t%s\n", app.Message) + fmt.Fprintf(tw, "\t%s\n", comp.Message) } } tw.Flush() diff --git a/internal/pkg/agent/configuration/grpc.go b/internal/pkg/agent/configuration/grpc.go new file mode 100644 index 00000000000..6624e6a0c08 --- /dev/null +++ b/internal/pkg/agent/configuration/grpc.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package configuration + +import "fmt" + +// GRPCConfig is a configuration of GRPC server. +type GRPCConfig struct { + Address string `config:"address"` + Port uint16 `config:"port"` +} + +// DefaultGRPCConfig creates a default server configuration. +func DefaultGRPCConfig() *GRPCConfig { + return &GRPCConfig{ + Address: "localhost", + Port: 6789, + } +} + +// String returns the composed listen address for the GRPC. +func (cfg *GRPCConfig) String() string { + return fmt.Sprintf("%s:%d", cfg.Address, cfg.Port) +} diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 7445f02a462..7c2c422a65b 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -12,7 +12,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" ) // ExternalInputsPattern is a glob that matches the paths of external configuration files. @@ -22,7 +21,7 @@ var ExternalInputsPattern = filepath.Join("inputs.d", "*.yml") type SettingsConfig struct { DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` ProcessConfig *process.Config `yaml:"process" config:"process" json:"process"` - GRPC *server.Config `yaml:"grpc" config:"grpc" json:"grpc"` + GRPC *GRPCConfig `yaml:"grpc" config:"grpc" json:"grpc"` RetryConfig *retry.Config `yaml:"retry" config:"retry" json:"retry"` MonitoringConfig *monitoringCfg.MonitoringConfig `yaml:"monitoring" config:"monitoring" json:"monitoring"` LoggingConfig *logger.Config `yaml:"logging,omitempty" config:"logging,omitempty" json:"logging,omitempty"` @@ -40,7 +39,7 @@ func DefaultSettingsConfig() *SettingsConfig { DownloadConfig: artifact.DefaultConfig(), LoggingConfig: logger.DefaultLoggingConfig(), MonitoringConfig: monitoringCfg.DefaultConfig(), - GRPC: server.DefaultGRPCConfig(), + GRPC: DefaultGRPCConfig(), Reload: DefaultReloadConfig(), } } diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/client/client.go index 728e830b462..634cc25a5af 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/client/client.go @@ -15,24 +15,38 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" ) -// Status is the status of the Elastic Agent -type Status = cproto.Status +// UnitType is the type of the unit +type UnitType = cproto.UnitType + +// State is the state codes +type State = cproto.State + +const ( + // UnitTypeInput is an input unit. + UnitTypeInput UnitType = cproto.UnitType_INPUT + // UnitTypeOutput is an output unit. + UnitTypeOutput UnitType = cproto.UnitType_OUTPUT +) const ( // Starting is when the it is still starting. - Starting Status = cproto.Status_STARTING + Starting State = cproto.State_STARTING // Configuring is when it is configuring. - Configuring Status = cproto.Status_CONFIGURING + Configuring State = cproto.State_CONFIGURING // Healthy is when it is healthy. - Healthy Status = cproto.Status_HEALTHY + Healthy State = cproto.State_HEALTHY // Degraded is when it is degraded. - Degraded Status = cproto.Status_DEGRADED + Degraded State = cproto.State_DEGRADED // Failed is when it is failed. - Failed Status = cproto.Status_FAILED + Failed State = cproto.State_FAILED // Stopping is when it is stopping. - Stopping Status = cproto.Status_STOPPING + Stopping State = cproto.State_STOPPING + // Stopped is when it is stopped. + Stopped State = cproto.State_STOPPED // Upgrading is when it is upgrading. - Upgrading Status = cproto.Status_UPGRADING + Upgrading State = cproto.State_UPGRADING + // Rollback is when it is upgrading is rolling back. + Rollback State = cproto.State_ROLLBACK ) // Version is the current running version of the daemon. @@ -43,14 +57,29 @@ type Version struct { Snapshot bool } -// ApplicationStatus is a status of an application managed by the Elastic Agent. -// TODO(Anderson): Implement sort.Interface and sort it. -type ApplicationStatus struct { - ID string - Name string - Status Status - Message string - Payload map[string]interface{} +// ComponentUnitState is a state of a unit running inside a component. +type ComponentUnitState struct { + UnitID string `json:"unit_id" yaml:"unit_id"` + UnitType UnitType `json:"unit_type" yaml:"unit_type"` + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Payload map[string]interface{} `json:"payload,omitempty" yaml:"payload,omitempty"` +} + +// ComponentState is a state of a component managed by the Elastic Agent. +type ComponentState struct { + ID string `json:"id" yaml:"id"` + Name string `json:"name" yaml:"name"` + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Units []ComponentUnitState `json:"units" yaml:"units"` +} + +// AgentState is the current state of the Elastic Agent. +type AgentState struct { + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Components []ComponentState `json:"components" yaml:"components"` } // ProcMeta is the running version and ID information for a running process. @@ -80,13 +109,6 @@ type ProcPProf struct { Error string } -// AgentStatus is the current status of the Elastic Agent. -type AgentStatus struct { - Status Status - Message string - Applications []*ApplicationStatus -} - // Client communicates to Elastic Agent through the control protocol. type Client interface { // Connect connects to the running Elastic Agent. @@ -95,8 +117,8 @@ type Client interface { Disconnect() // Version returns the current version of the running agent. Version(ctx context.Context) (Version, error) - // Status returns the current status of the running agent. - Status(ctx context.Context) (*AgentStatus, error) + // State returns the current state of the running agent. + State(ctx context.Context) (*AgentState, error) // Restart triggers restarting the current running daemon. Restart(ctx context.Context) error // Upgrade triggers upgrade of the current running daemon. @@ -161,32 +183,42 @@ func (c *client) Version(ctx context.Context) (Version, error) { }, nil } -// Status returns the current status of the running agent. -func (c *client) Status(ctx context.Context) (*AgentStatus, error) { - res, err := c.client.Status(ctx, &cproto.Empty{}) +// State returns the current state of the running agent. +func (c *client) State(ctx context.Context) (*AgentState, error) { + res, err := c.client.State(ctx, &cproto.Empty{}) if err != nil { return nil, err } - s := &AgentStatus{ - Status: res.Status, - Message: res.Message, - Applications: make([]*ApplicationStatus, len(res.Applications)), + s := &AgentState{ + State: res.State, + Message: res.Message, + Components: make([]ComponentState, 0, len(res.Components)), } - for i, appRes := range res.Applications { - var payload map[string]interface{} - if appRes.Payload != "" { - err := json.Unmarshal([]byte(appRes.Payload), &payload) - if err != nil { - return nil, err + for _, comp := range res.Components { + units := make([]ComponentUnitState, 0, len(comp.Units)) + for _, unit := range comp.Units { + var payload map[string]interface{} + if unit.Payload != "" { + err := json.Unmarshal([]byte(unit.Payload), &payload) + if err != nil { + return nil, err + } } + units = append(units, ComponentUnitState{ + UnitID: unit.UnitId, + UnitType: unit.UnitType, + State: unit.State, + Message: unit.Message, + Payload: payload, + }) } - s.Applications[i] = &ApplicationStatus{ - ID: appRes.Id, - Name: appRes.Name, - Status: appRes.Status, - Message: appRes.Message, - Payload: payload, - } + s.Components = append(s.Components, ComponentState{ + ID: comp.Id, + Name: comp.Name, + State: comp.State, + Message: comp.Message, + Units: units, + }) } return s, nil } diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go index 43609b68f0a..7ada35a4fe0 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -25,71 +25,121 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Status codes for the current state. -type Status int32 +// State codes for the current state. +type State int32 const ( - Status_STARTING Status = 0 - Status_CONFIGURING Status = 1 - Status_HEALTHY Status = 2 - Status_DEGRADED Status = 3 - Status_FAILED Status = 4 - Status_STOPPING Status = 5 - Status_UPGRADING Status = 6 - Status_ROLLBACK Status = 7 + State_STARTING State = 0 + State_CONFIGURING State = 1 + State_HEALTHY State = 2 + State_DEGRADED State = 3 + State_FAILED State = 4 + State_STOPPING State = 5 + State_STOPPED State = 6 + State_UPGRADING State = 7 + State_ROLLBACK State = 8 ) -// Enum value maps for Status. +// Enum value maps for State. var ( - Status_name = map[int32]string{ + State_name = map[int32]string{ 0: "STARTING", 1: "CONFIGURING", 2: "HEALTHY", 3: "DEGRADED", 4: "FAILED", 5: "STOPPING", - 6: "UPGRADING", - 7: "ROLLBACK", + 6: "STOPPED", + 7: "UPGRADING", + 8: "ROLLBACK", } - Status_value = map[string]int32{ + State_value = map[string]int32{ "STARTING": 0, "CONFIGURING": 1, "HEALTHY": 2, "DEGRADED": 3, "FAILED": 4, "STOPPING": 5, - "UPGRADING": 6, - "ROLLBACK": 7, + "STOPPED": 6, + "UPGRADING": 7, + "ROLLBACK": 8, } ) -func (x Status) Enum() *Status { - p := new(Status) +func (x State) Enum() *State { + p := new(State) *p = x return p } -func (x Status) String() string { +func (x State) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (Status) Descriptor() protoreflect.EnumDescriptor { +func (State) Descriptor() protoreflect.EnumDescriptor { return file_control_proto_enumTypes[0].Descriptor() } -func (Status) Type() protoreflect.EnumType { +func (State) Type() protoreflect.EnumType { return &file_control_proto_enumTypes[0] } -func (x Status) Number() protoreflect.EnumNumber { +func (x State) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Status.Descriptor instead. -func (Status) EnumDescriptor() ([]byte, []int) { +// Deprecated: Use State.Descriptor instead. +func (State) EnumDescriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{0} } +// Unit Type running inside a component. +type UnitType int32 + +const ( + UnitType_INPUT UnitType = 0 + UnitType_OUTPUT UnitType = 1 +) + +// Enum value maps for UnitType. +var ( + UnitType_name = map[int32]string{ + 0: "INPUT", + 1: "OUTPUT", + } + UnitType_value = map[string]int32{ + "INPUT": 0, + "OUTPUT": 1, + } +) + +func (x UnitType) Enum() *UnitType { + p := new(UnitType) + *p = x + return p +} + +func (x UnitType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UnitType) Descriptor() protoreflect.EnumDescriptor { + return file_control_proto_enumTypes[1].Descriptor() +} + +func (UnitType) Type() protoreflect.EnumType { + return &file_control_proto_enumTypes[1] +} + +func (x UnitType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UnitType.Descriptor instead. +func (UnitType) EnumDescriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{1} +} + // Action status codes for restart and upgrade response. type ActionStatus int32 @@ -123,11 +173,11 @@ func (x ActionStatus) String() string { } func (ActionStatus) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[1].Descriptor() + return file_control_proto_enumTypes[2].Descriptor() } func (ActionStatus) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[1] + return &file_control_proto_enumTypes[2] } func (x ActionStatus) Number() protoreflect.EnumNumber { @@ -136,7 +186,7 @@ func (x ActionStatus) Number() protoreflect.EnumNumber { // Deprecated: Use ActionStatus.Descriptor instead. func (ActionStatus) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{1} + return file_control_proto_rawDescGZIP(), []int{2} } // pprof endpoint that can be requested. @@ -191,11 +241,11 @@ func (x PprofOption) String() string { } func (PprofOption) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[2].Descriptor() + return file_control_proto_enumTypes[3].Descriptor() } func (PprofOption) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[2] + return &file_control_proto_enumTypes[3] } func (x PprofOption) Number() protoreflect.EnumNumber { @@ -204,7 +254,7 @@ func (x PprofOption) Number() protoreflect.EnumNumber { // Deprecated: Use PprofOption.Descriptor instead. func (PprofOption) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{2} + return file_control_proto_rawDescGZIP(), []int{3} } // Empty message. @@ -511,26 +561,25 @@ func (x *UpgradeResponse) GetError() string { return "" } -// Current status of the application in Elastic Agent. -type ApplicationStatus struct { +type ComponentUnitState struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Unique application ID. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Application name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Current status. - Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` - // Current status message. + // Type of unit in the component. + UnitType UnitType `protobuf:"varint,1,opt,name=unit_type,json=unitType,proto3,enum=cproto.UnitType" json:"unit_type,omitempty"` + // ID of the unit in the component. + UnitId string `protobuf:"bytes,2,opt,name=unit_id,json=unitId,proto3" json:"unit_id,omitempty"` + // Current state. + State State `protobuf:"varint,3,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` + // Current state message. Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` - // Current status payload. + // Current state payload. Payload string `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"` } -func (x *ApplicationStatus) Reset() { - *x = ApplicationStatus{} +func (x *ComponentUnitState) Reset() { + *x = ComponentUnitState{} if protoimpl.UnsafeEnabled { mi := &file_control_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -538,13 +587,13 @@ func (x *ApplicationStatus) Reset() { } } -func (x *ApplicationStatus) String() string { +func (x *ComponentUnitState) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplicationStatus) ProtoMessage() {} +func (*ComponentUnitState) ProtoMessage() {} -func (x *ApplicationStatus) ProtoReflect() protoreflect.Message { +func (x *ComponentUnitState) ProtoReflect() protoreflect.Message { mi := &file_control_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -556,44 +605,205 @@ func (x *ApplicationStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplicationStatus.ProtoReflect.Descriptor instead. -func (*ApplicationStatus) Descriptor() ([]byte, []int) { +// Deprecated: Use ComponentUnitState.ProtoReflect.Descriptor instead. +func (*ComponentUnitState) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{5} } -func (x *ApplicationStatus) GetId() string { +func (x *ComponentUnitState) GetUnitType() UnitType { + if x != nil { + return x.UnitType + } + return UnitType_INPUT +} + +func (x *ComponentUnitState) GetUnitId() string { + if x != nil { + return x.UnitId + } + return "" +} + +func (x *ComponentUnitState) GetState() State { + if x != nil { + return x.State + } + return State_STARTING +} + +func (x *ComponentUnitState) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ComponentUnitState) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +// Version information reported by the component to Elastic Agent. +type ComponentVersionInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the component. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Version of the component. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Extra meta information about the version. + Meta map[string]string `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ComponentVersionInfo) Reset() { + *x = ComponentVersionInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComponentVersionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComponentVersionInfo) ProtoMessage() {} + +func (x *ComponentVersionInfo) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComponentVersionInfo.ProtoReflect.Descriptor instead. +func (*ComponentVersionInfo) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{6} +} + +func (x *ComponentVersionInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ComponentVersionInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ComponentVersionInfo) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +// Current state of a running component by Elastic Agent. +type ComponentState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique component ID. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Component name. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Current state. + State State `protobuf:"varint,3,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` + // Current state message. + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + // Current units running in the component. + Units []*ComponentUnitState `protobuf:"bytes,5,rep,name=units,proto3" json:"units,omitempty"` + // Current version information for the running component. + VersionInfo *ComponentVersionInfo `protobuf:"bytes,6,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` +} + +func (x *ComponentState) Reset() { + *x = ComponentState{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComponentState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComponentState) ProtoMessage() {} + +func (x *ComponentState) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComponentState.ProtoReflect.Descriptor instead. +func (*ComponentState) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{7} +} + +func (x *ComponentState) GetId() string { if x != nil { return x.Id } return "" } -func (x *ApplicationStatus) GetName() string { +func (x *ComponentState) GetName() string { if x != nil { return x.Name } return "" } -func (x *ApplicationStatus) GetStatus() Status { +func (x *ComponentState) GetState() State { if x != nil { - return x.Status + return x.State } - return Status_STARTING + return State_STARTING } -func (x *ApplicationStatus) GetMessage() string { +func (x *ComponentState) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *ApplicationStatus) GetPayload() string { +func (x *ComponentState) GetUnits() []*ComponentUnitState { if x != nil { - return x.Payload + return x.Units } - return "" + return nil +} + +func (x *ComponentState) GetVersionInfo() *ComponentVersionInfo { + if x != nil { + return x.VersionInfo + } + return nil } // Current metadata for a running process. @@ -622,7 +832,7 @@ type ProcMeta struct { func (x *ProcMeta) Reset() { *x = ProcMeta{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -635,7 +845,7 @@ func (x *ProcMeta) String() string { func (*ProcMeta) ProtoMessage() {} func (x *ProcMeta) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -648,7 +858,7 @@ func (x *ProcMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMeta.ProtoReflect.Descriptor instead. func (*ProcMeta) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{6} + return file_control_proto_rawDescGZIP(), []int{8} } func (x *ProcMeta) GetProcess() string { @@ -756,37 +966,37 @@ func (x *ProcMeta) GetError() string { return "" } -// Status is the current status of Elastic Agent. -type StatusResponse struct { +// StateResponse is the current state of Elastic Agent. +type StateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Overall status of Elastic Agent. - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` + // Overall state of Elastic Agent. + State State `protobuf:"varint,1,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` // Overall status message of Elastic Agent. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // Status of each application in Elastic Agent. - Applications []*ApplicationStatus `protobuf:"bytes,3,rep,name=applications,proto3" json:"applications,omitempty"` + // Status of each component in Elastic Agent. + Components []*ComponentState `protobuf:"bytes,3,rep,name=components,proto3" json:"components,omitempty"` } -func (x *StatusResponse) Reset() { - *x = StatusResponse{} +func (x *StateResponse) Reset() { + *x = StateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[7] + mi := &file_control_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StatusResponse) String() string { +func (x *StateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StatusResponse) ProtoMessage() {} +func (*StateResponse) ProtoMessage() {} -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[7] +func (x *StateResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -797,28 +1007,28 @@ func (x *StatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{7} +// Deprecated: Use StateResponse.ProtoReflect.Descriptor instead. +func (*StateResponse) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{9} } -func (x *StatusResponse) GetStatus() Status { +func (x *StateResponse) GetState() State { if x != nil { - return x.Status + return x.State } - return Status_STARTING + return State_STARTING } -func (x *StatusResponse) GetMessage() string { +func (x *StateResponse) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *StatusResponse) GetApplications() []*ApplicationStatus { +func (x *StateResponse) GetComponents() []*ComponentState { if x != nil { - return x.Applications + return x.Components } return nil } @@ -835,7 +1045,7 @@ type ProcMetaResponse struct { func (x *ProcMetaResponse) Reset() { *x = ProcMetaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -848,7 +1058,7 @@ func (x *ProcMetaResponse) String() string { func (*ProcMetaResponse) ProtoMessage() {} func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -861,7 +1071,7 @@ func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMetaResponse.ProtoReflect.Descriptor instead. func (*ProcMetaResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{8} + return file_control_proto_rawDescGZIP(), []int{10} } func (x *ProcMetaResponse) GetProcs() []*ProcMeta { @@ -890,7 +1100,7 @@ type PprofRequest struct { func (x *PprofRequest) Reset() { *x = PprofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -903,7 +1113,7 @@ func (x *PprofRequest) String() string { func (*PprofRequest) ProtoMessage() {} func (x *PprofRequest) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -916,7 +1126,7 @@ func (x *PprofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofRequest.ProtoReflect.Descriptor instead. func (*PprofRequest) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{9} + return file_control_proto_rawDescGZIP(), []int{11} } func (x *PprofRequest) GetPprofType() []PprofOption { @@ -963,7 +1173,7 @@ type PprofResult struct { func (x *PprofResult) Reset() { *x = PprofResult{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -976,7 +1186,7 @@ func (x *PprofResult) String() string { func (*PprofResult) ProtoMessage() {} func (x *PprofResult) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -989,7 +1199,7 @@ func (x *PprofResult) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofResult.ProtoReflect.Descriptor instead. func (*PprofResult) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{10} + return file_control_proto_rawDescGZIP(), []int{12} } func (x *PprofResult) GetAppName() string { @@ -1039,7 +1249,7 @@ type PprofResponse struct { func (x *PprofResponse) Reset() { *x = PprofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1052,7 +1262,7 @@ func (x *PprofResponse) String() string { func (*PprofResponse) ProtoMessage() {} func (x *PprofResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1065,7 +1275,7 @@ func (x *PprofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofResponse.ProtoReflect.Descriptor instead. func (*PprofResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{11} + return file_control_proto_rawDescGZIP(), []int{13} } func (x *PprofResponse) GetResults() []*PprofResult { @@ -1090,7 +1300,7 @@ type MetricsResponse struct { func (x *MetricsResponse) Reset() { *x = MetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1103,7 +1313,7 @@ func (x *MetricsResponse) String() string { func (*MetricsResponse) ProtoMessage() {} func (x *MetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1116,7 +1326,7 @@ func (x *MetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. func (*MetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{12} + return file_control_proto_rawDescGZIP(), []int{14} } func (x *MetricsResponse) GetAppName() string { @@ -1159,7 +1369,7 @@ type ProcMetricsResponse struct { func (x *ProcMetricsResponse) Reset() { *x = ProcMetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1172,7 +1382,7 @@ func (x *ProcMetricsResponse) String() string { func (*ProcMetricsResponse) ProtoMessage() {} func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1185,7 +1395,7 @@ func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMetricsResponse.ProtoReflect.Descriptor instead. func (*ProcMetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{13} + return file_control_proto_rawDescGZIP(), []int{15} } func (x *ProcMetricsResponse) GetResult() []*MetricsResponse { @@ -1225,139 +1435,170 @@ var file_control_proto_rawDesc = []byte{ 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0x93, 0x01, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb5, 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, - 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, - 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, - 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, - 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, - 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x91, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, - 0x9d, 0x01, 0x0a, 0x0c, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, - 0xa4, 0x01, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, - 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, - 0x13, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2a, 0x79, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, - 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, - 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, - 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, - 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, - 0x47, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, - 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x07, - 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, - 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, - 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, - 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, - 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, - 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8e, 0x03, 0x0a, 0x13, - 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, - 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, - 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, - 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, + 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, + 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, + 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x14, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, + 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, + 0x6e, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x69, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x3f, 0x0a, + 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb5, + 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, + 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, + 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x0c, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x09, + 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, + 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0b, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x13, 0x50, 0x72, 0x6f, + 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, + 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, + 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, + 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, + 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, + 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, + 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, + 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, + 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, + 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8c, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, + 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, + 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, + 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1372,57 +1613,66 @@ func file_control_proto_rawDescGZIP() []byte { return file_control_proto_rawDescData } -var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_control_proto_goTypes = []interface{}{ - (Status)(0), // 0: cproto.Status - (ActionStatus)(0), // 1: cproto.ActionStatus - (PprofOption)(0), // 2: cproto.PprofOption - (*Empty)(nil), // 3: cproto.Empty - (*VersionResponse)(nil), // 4: cproto.VersionResponse - (*RestartResponse)(nil), // 5: cproto.RestartResponse - (*UpgradeRequest)(nil), // 6: cproto.UpgradeRequest - (*UpgradeResponse)(nil), // 7: cproto.UpgradeResponse - (*ApplicationStatus)(nil), // 8: cproto.ApplicationStatus - (*ProcMeta)(nil), // 9: cproto.ProcMeta - (*StatusResponse)(nil), // 10: cproto.StatusResponse - (*ProcMetaResponse)(nil), // 11: cproto.ProcMetaResponse - (*PprofRequest)(nil), // 12: cproto.PprofRequest - (*PprofResult)(nil), // 13: cproto.PprofResult - (*PprofResponse)(nil), // 14: cproto.PprofResponse - (*MetricsResponse)(nil), // 15: cproto.MetricsResponse - (*ProcMetricsResponse)(nil), // 16: cproto.ProcMetricsResponse + (State)(0), // 0: cproto.State + (UnitType)(0), // 1: cproto.UnitType + (ActionStatus)(0), // 2: cproto.ActionStatus + (PprofOption)(0), // 3: cproto.PprofOption + (*Empty)(nil), // 4: cproto.Empty + (*VersionResponse)(nil), // 5: cproto.VersionResponse + (*RestartResponse)(nil), // 6: cproto.RestartResponse + (*UpgradeRequest)(nil), // 7: cproto.UpgradeRequest + (*UpgradeResponse)(nil), // 8: cproto.UpgradeResponse + (*ComponentUnitState)(nil), // 9: cproto.ComponentUnitState + (*ComponentVersionInfo)(nil), // 10: cproto.ComponentVersionInfo + (*ComponentState)(nil), // 11: cproto.ComponentState + (*ProcMeta)(nil), // 12: cproto.ProcMeta + (*StateResponse)(nil), // 13: cproto.StateResponse + (*ProcMetaResponse)(nil), // 14: cproto.ProcMetaResponse + (*PprofRequest)(nil), // 15: cproto.PprofRequest + (*PprofResult)(nil), // 16: cproto.PprofResult + (*PprofResponse)(nil), // 17: cproto.PprofResponse + (*MetricsResponse)(nil), // 18: cproto.MetricsResponse + (*ProcMetricsResponse)(nil), // 19: cproto.ProcMetricsResponse + nil, // 20: cproto.ComponentVersionInfo.MetaEntry } var file_control_proto_depIdxs = []int32{ - 1, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus - 1, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus - 0, // 2: cproto.ApplicationStatus.status:type_name -> cproto.Status - 0, // 3: cproto.StatusResponse.status:type_name -> cproto.Status - 8, // 4: cproto.StatusResponse.applications:type_name -> cproto.ApplicationStatus - 9, // 5: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta - 2, // 6: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption - 2, // 7: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption - 13, // 8: cproto.PprofResponse.results:type_name -> cproto.PprofResult - 15, // 9: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse - 3, // 10: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty - 3, // 11: cproto.ElasticAgentControl.Status:input_type -> cproto.Empty - 3, // 12: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty - 6, // 13: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest - 3, // 14: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty - 12, // 15: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest - 3, // 16: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty - 4, // 17: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse - 10, // 18: cproto.ElasticAgentControl.Status:output_type -> cproto.StatusResponse - 5, // 19: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse - 7, // 20: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse - 11, // 21: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse - 14, // 22: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse - 16, // 23: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse - 17, // [17:24] is the sub-list for method output_type - 10, // [10:17] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 2, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus + 2, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus + 1, // 2: cproto.ComponentUnitState.unit_type:type_name -> cproto.UnitType + 0, // 3: cproto.ComponentUnitState.state:type_name -> cproto.State + 20, // 4: cproto.ComponentVersionInfo.meta:type_name -> cproto.ComponentVersionInfo.MetaEntry + 0, // 5: cproto.ComponentState.state:type_name -> cproto.State + 9, // 6: cproto.ComponentState.units:type_name -> cproto.ComponentUnitState + 10, // 7: cproto.ComponentState.version_info:type_name -> cproto.ComponentVersionInfo + 0, // 8: cproto.StateResponse.state:type_name -> cproto.State + 11, // 9: cproto.StateResponse.components:type_name -> cproto.ComponentState + 12, // 10: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta + 3, // 11: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption + 3, // 12: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption + 16, // 13: cproto.PprofResponse.results:type_name -> cproto.PprofResult + 18, // 14: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse + 4, // 15: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty + 4, // 16: cproto.ElasticAgentControl.State:input_type -> cproto.Empty + 4, // 17: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty + 7, // 18: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest + 4, // 19: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty + 15, // 20: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest + 4, // 21: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty + 5, // 22: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse + 13, // 23: cproto.ElasticAgentControl.State:output_type -> cproto.StateResponse + 6, // 24: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse + 8, // 25: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse + 14, // 26: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse + 17, // 27: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse + 19, // 28: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse + 22, // [22:29] is the sub-list for method output_type + 15, // [15:22] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_control_proto_init() } @@ -1492,7 +1742,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplicationStatus); i { + switch v := v.(*ComponentUnitState); i { case 0: return &v.state case 1: @@ -1504,7 +1754,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMeta); i { + switch v := v.(*ComponentVersionInfo); i { case 0: return &v.state case 1: @@ -1516,7 +1766,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatusResponse); i { + switch v := v.(*ComponentState); i { case 0: return &v.state case 1: @@ -1528,7 +1778,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMetaResponse); i { + switch v := v.(*ProcMeta); i { case 0: return &v.state case 1: @@ -1540,7 +1790,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofRequest); i { + switch v := v.(*StateResponse); i { case 0: return &v.state case 1: @@ -1552,7 +1802,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResult); i { + switch v := v.(*ProcMetaResponse); i { case 0: return &v.state case 1: @@ -1564,7 +1814,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResponse); i { + switch v := v.(*PprofRequest); i { case 0: return &v.state case 1: @@ -1576,7 +1826,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricsResponse); i { + switch v := v.(*PprofResult); i { case 0: return &v.state case 1: @@ -1588,6 +1838,30 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PprofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProcMetricsResponse); i { case 0: return &v.state @@ -1605,8 +1879,8 @@ func file_control_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_control_proto_rawDesc, - NumEnums: 3, - NumMessages: 14, + NumEnums: 4, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/pkg/agent/control/cproto/control_grpc.pb.go b/internal/pkg/agent/control/cproto/control_grpc.pb.go index 3365f1a6496..c9e97f7047a 100644 --- a/internal/pkg/agent/control/cproto/control_grpc.pb.go +++ b/internal/pkg/agent/control/cproto/control_grpc.pb.go @@ -29,8 +29,8 @@ const _ = grpc.SupportPackageIsVersion7 type ElasticAgentControlClient interface { // Fetches the currently running version of the Elastic Agent. Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) + // Fetches the currently states of the Elastic Agent. + State(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StateResponse, error) // Restart restarts the current running Elastic Agent. Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. @@ -60,9 +60,9 @@ func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts return out, nil } -func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Status", in, out, opts...) +func (c *elasticAgentControlClient) State(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/State", in, out, opts...) if err != nil { return nil, err } @@ -120,8 +120,8 @@ func (c *elasticAgentControlClient) ProcMetrics(ctx context.Context, in *Empty, type ElasticAgentControlServer interface { // Fetches the currently running version of the Elastic Agent. Version(context.Context, *Empty) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(context.Context, *Empty) (*StatusResponse, error) + // Fetches the currently states of the Elastic Agent. + State(context.Context, *Empty) (*StateResponse, error) // Restart restarts the current running Elastic Agent. Restart(context.Context, *Empty) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. @@ -142,8 +142,8 @@ type UnimplementedElasticAgentControlServer struct { func (UnimplementedElasticAgentControlServer) Version(context.Context, *Empty) (*VersionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") } -func (UnimplementedElasticAgentControlServer) Status(context.Context, *Empty) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +func (UnimplementedElasticAgentControlServer) State(context.Context, *Empty) (*StateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method State not implemented") } func (UnimplementedElasticAgentControlServer) Restart(context.Context, *Empty) (*RestartResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Restart not implemented") @@ -191,20 +191,20 @@ func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _ElasticAgentControl_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _ElasticAgentControl_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ElasticAgentControlServer).Status(ctx, in) + return srv.(ElasticAgentControlServer).State(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Status", + FullMethod: "/cproto.ElasticAgentControl/State", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Status(ctx, req.(*Empty)) + return srv.(ElasticAgentControlServer).State(ctx, req.(*Empty)) } return interceptor(ctx, in, info, handler) } @@ -311,8 +311,8 @@ var ElasticAgentControl_ServiceDesc = grpc.ServiceDesc{ Handler: _ElasticAgentControl_Version_Handler, }, { - MethodName: "Status", - Handler: _ElasticAgentControl_Status_Handler, + MethodName: "State", + Handler: _ElasticAgentControl_State_Handler, }, { MethodName: "Restart", diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 6d3e5181729..620a5b7b024 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -8,31 +8,20 @@ import ( "context" "encoding/json" "fmt" - "io" "net" - "net/http" - "runtime" - "strings" "sync" - "time" "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/control" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - monitoring "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/socket" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/internal/pkg/sorted" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -43,11 +32,8 @@ type Server struct { cproto.UnimplementedElasticAgentControlServer logger *logger.Logger - rex reexec.ExecManager - statusCtrl status.Controller - up *upgrade.Upgrader - routeFn func() *sorted.Set monitoringCfg *monitoringCfg.MonitoringConfig + coord *coordinator.Coordinator listener net.Listener server *grpc.Server tracer *apm.Tracer @@ -65,38 +51,15 @@ type specInfo struct { } // New creates a new control protocol server. -func New(log *logger.Logger, rex reexec.ExecManager, statusCtrl status.Controller, up *upgrade.Upgrader, tracer *apm.Tracer) *Server { +func New(log *logger.Logger, cfg *monitoringCfg.MonitoringConfig, coord *coordinator.Coordinator, tracer *apm.Tracer) *Server { return &Server{ - logger: log, - rex: rex, - statusCtrl: statusCtrl, - tracer: tracer, - up: up, + logger: log, + monitoringCfg: cfg, + coord: coord, + tracer: tracer, } } -// SetUpgrader changes the upgrader. -func (s *Server) SetUpgrader(up *upgrade.Upgrader) { - s.lock.Lock() - defer s.lock.Unlock() - s.up = up -} - -// SetRouteFn changes the route retrieval function. -func (s *Server) SetRouteFn(routesFetchFn func() *sorted.Set) { - s.lock.Lock() - defer s.lock.Unlock() - s.routeFn = routesFetchFn -} - -// SetMonitoringCfg sets a reference to the monitoring config used by the running agent. -// the controller references this config to find out if pprof is enabled for the agent or not -func (s *Server) SetMonitoringCfg(cfg *monitoringCfg.MonitoringConfig) { - s.lock.Lock() - defer s.lock.Unlock() - s.monitoringCfg = cfg -} - // Start starts the GRPC endpoint and accepts new connections. func (s *Server) Start() error { if s.server != nil { @@ -149,19 +112,53 @@ func (s *Server) Version(_ context.Context, _ *cproto.Empty) (*cproto.VersionRes }, nil } -// Status returns the overall status of the agent. -func (s *Server) Status(_ context.Context, _ *cproto.Empty) (*cproto.StatusResponse, error) { - status := s.statusCtrl.Status() - return &cproto.StatusResponse{ - Status: agentStatusToProto(status.Status), - Message: status.Message, - Applications: agentAppStatusToProto(status.Applications), +// State returns the overall state of the agent. +func (s *Server) State(_ context.Context, _ *cproto.Empty) (*cproto.StateResponse, error) { + var err error + + state := s.coord.State() + components := make([]*cproto.ComponentState, 0, len(state.Components)) + for _, comp := range state.Components { + units := make([]*cproto.ComponentUnitState, 0, len(comp.State.Units)) + for key, unit := range comp.State.Units { + payload := []byte("") + if unit.Payload != nil { + payload, err = json.Marshal(unit.Payload) + if err != nil { + return nil, fmt.Errorf("failed to marshal componend %s unit %s payload: %w", comp.Component.ID, key.UnitID, err) + } + } + units = append(units, &cproto.ComponentUnitState{ + UnitType: cproto.UnitType(key.UnitType), + UnitId: key.UnitID, + State: cproto.State(unit.State), + Message: unit.Message, + Payload: string(payload), + }) + } + components = append(components, &cproto.ComponentState{ + Id: comp.Component.ID, + Name: comp.Component.Spec.BinaryName, + State: cproto.State(comp.State.State), + Message: comp.State.Message, + Units: units, + VersionInfo: &cproto.ComponentVersionInfo{ + Name: comp.State.VersionInfo.Name, + Version: comp.State.VersionInfo.Version, + Meta: comp.State.VersionInfo.Meta, + }, + }) + } + return &cproto.StateResponse{ + State: state.State, + Message: state.Message, + Components: components, }, nil } // Restart performs re-exec. func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartResponse, error) { - s.rex.ReExec(nil) + s.coord.ReExec(nil) return &cproto.RestartResponse{ Status: cproto.ActionStatus_SUCCESS, }, nil @@ -169,29 +166,13 @@ func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartRes // Upgrade performs the upgrade operation. func (s *Server) Upgrade(ctx context.Context, request *cproto.UpgradeRequest) (*cproto.UpgradeResponse, error) { - s.lock.RLock() - u := s.up - s.lock.RUnlock() - if u == nil { - // not running with upgrader (must be controlled by Fleet) - return &cproto.UpgradeResponse{ - Status: cproto.ActionStatus_FAILURE, - Error: "cannot be upgraded; perform upgrading using Fleet", - }, nil - } - cb, err := u.Upgrade(ctx, &upgradeRequest{request}, false) + err := s.coord.Upgrade(ctx, request.Version, request.SourceURI, nil) if err != nil { return &cproto.UpgradeResponse{ //nolint:nilerr // returns err as response Status: cproto.ActionStatus_FAILURE, Error: err.Error(), }, nil } - // perform the re-exec after a 1 second delay - // this ensures that the upgrade response over GRPC is returned - go func() { - <-time.After(time.Second) - s.rex.ReExec(cb) - }() return &cproto.UpgradeResponse{ Status: cproto.ActionStatus_SUCCESS, Version: request.Version, @@ -217,25 +198,28 @@ type BeatInfo struct { // ProcMeta returns version and beat inforation for all running processes. func (s *Server) ProcMeta(ctx context.Context, _ *cproto.Empty) (*cproto.ProcMetaResponse, error) { - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - resp := &cproto.ProcMetaResponse{ - Procs: []*cproto.ProcMeta{}, - } + resp := &cproto.ProcMetaResponse{ + Procs: []*cproto.ProcMeta{}, + } - // gather spec data for all rk/apps running - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) + // gather spec data for all rk/apps running + specs := s.getSpecInfo("", "") + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + client := newSocketRequester(si.app, si.rk, endpoint) - procMeta := client.procMeta(ctx) - resp.Procs = append(resp.Procs, procMeta) - } + procMeta := client.procMeta(ctx) + resp.Procs = append(resp.Procs, procMeta) + } - return resp, nil + return resp, nil + */ + return nil, nil } // Pprof returns /debug/pprof data for the requested applicaiont-route_key or all running applications. @@ -244,66 +228,69 @@ func (s *Server) Pprof(ctx context.Context, req *cproto.PprofRequest) (*cproto.P return nil, fmt.Errorf("agent.monitoring.pprof disabled") } - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } - - dur, err := time.ParseDuration(req.TraceDuration) - if err != nil { - return nil, fmt.Errorf("unable to parse trace duration: %w", err) - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - resp := &cproto.PprofResponse{ - Results: []*cproto.PprofResult{}, - } + dur, err := time.ParseDuration(req.TraceDuration) + if err != nil { + return nil, fmt.Errorf("unable to parse trace duration: %w", err) + } - var wg sync.WaitGroup - ch := make(chan *cproto.PprofResult, 1) + resp := &cproto.PprofResponse{ + Results: []*cproto.PprofResult{}, + } - // retrieve elastic-agent pprof data if requested or application is unspecified. - if req.AppName == "" || req.AppName == agentName { - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) + var wg sync.WaitGroup + ch := make(chan *cproto.PprofResult, 1) + + // retrieve elastic-agent pprof data if requested or application is unspecified. + if req.AppName == "" || req.AppName == agentName { + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt cproto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } } - } - // get requested rk/appname spec or all specs - var specs []specInfo - if req.AppName != agentName { - specs = s.getSpecInfo(req.RouteKey, req.AppName) - } - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - c := newSocketRequester(si.app, si.rk, endpoint) - // Launch a concurrent goroutine to gather all pprof endpoints from a socket. - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) + // get requested rk/appname spec or all specs + var specs []specInfo + if req.AppName != agentName { + specs = s.getSpecInfo(req.RouteKey, req.AppName) + } + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + c := newSocketRequester(si.app, si.rk, endpoint) + // Launch a concurrent goroutine to gather all pprof endpoints from a socket. + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt cproto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } } - } - // wait for the waitgroup to be done and close the channel - go func() { - wg.Wait() - close(ch) - }() + // wait for the waitgroup to be done and close the channel + go func() { + wg.Wait() + close(ch) + }() - // gather all results from channel until closed. - for res := range ch { - resp.Results = append(resp.Results, res) - } - return resp, nil + // gather all results from channel until closed. + for res := range ch { + resp.Results = append(resp.Results, res) + } + return resp, nil + */ + return nil, nil } // ProcMetrics returns all buffered metrics data for the agent and running processes. @@ -313,32 +300,36 @@ func (s *Server) ProcMetrics(ctx context.Context, _ *cproto.Empty) (*cproto.Proc return &cproto.ProcMetricsResponse{}, nil } - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - // gather metrics buffer data from the elastic-agent - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - metrics := c.procMetrics(ctx) + // gather metrics buffer data from the elastic-agent + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) + metrics := c.procMetrics(ctx) - resp := &cproto.ProcMetricsResponse{ - Result: []*cproto.MetricsResponse{metrics}, - } + resp := &cproto.ProcMetricsResponse{ + Result: []*cproto.MetricsResponse{metrics}, + } - // gather metrics buffer data from all other processes - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) + // gather metrics buffer data from all other processes + specs := s.getSpecInfo("", "") + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + client := newSocketRequester(si.app, si.rk, endpoint) - s.logger.Infof("gather metrics from %s", endpoint) - metrics := client.procMetrics(ctx) - resp.Result = append(resp.Result, metrics) - } - return resp, nil + s.logger.Infof("gather metrics from %s", endpoint) + metrics := client.procMetrics(ctx) + resp.Result = append(resp.Result, metrics) + } + return resp, nil + */ + return nil, nil } +/* // getSpecs will return the specs for the program associated with the specified route key/app name, or all programs if no key(s) are specified. // if matchRK or matchApp are empty all results will be returned. func (s *Server) getSpecInfo(matchRK, matchApp string) []specInfo { @@ -554,48 +545,4 @@ func (r *socketRequester) procMetrics(ctx context.Context) *cproto.MetricsRespon res.Result = p return res } - -type upgradeRequest struct { - *cproto.UpgradeRequest -} - -func (r *upgradeRequest) Version() string { - return r.GetVersion() -} - -func (r *upgradeRequest) SourceURI() string { - return r.GetSourceURI() -} - -func (r *upgradeRequest) FleetAction() *fleetapi.ActionUpgrade { - // upgrade request not from Fleet - return nil -} - -func agentStatusToProto(code status.AgentStatusCode) cproto.Status { - if code == status.Degraded { - return cproto.Status_DEGRADED - } - if code == status.Failed { - return cproto.Status_FAILED - } - return cproto.Status_HEALTHY -} - -func agentAppStatusToProto(apps []status.AgentApplicationStatus) []*cproto.ApplicationStatus { - s := make([]*cproto.ApplicationStatus, len(apps)) - for i, a := range apps { - var payload []byte - if a.Payload != nil { - payload, _ = json.Marshal(a.Payload) - } - s[i] = &cproto.ApplicationStatus{ - Id: a.ID, - Name: a.Name, - Status: cproto.Status(a.Status.ToProto()), - Message: a.Message, - Payload: string(payload), - } - } - return s -} +*/ diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 6b4e717fa73..b43db32892d 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -12,7 +12,6 @@ import ( "path/filepath" "runtime" "strings" - "sync" "github.com/kardianos/service" @@ -29,7 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -233,19 +231,12 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) inputs, ok := transpiler.Lookup(ast, "inputs") if ok { varsArray := make([]*transpiler.Vars, 0) - var wg sync.WaitGroup - wg.Add(1) - varsCallback := func(vv []*transpiler.Vars) { - varsArray = vv - wg.Done() - } ctrl, err := composable.New(log, cfg) if err != nil { return nil, err } - _ = ctrl.Run(ctx, varsCallback) - wg.Wait() + _ = ctrl.Run(ctx) renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) if err != nil { @@ -258,7 +249,7 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) } // apply caps - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) if err != nil { return nil, err } diff --git a/internal/pkg/agent/storage/store/state_store.go b/internal/pkg/agent/storage/store/state_store.go index 3316b34960b..8a6d3fc5e8d 100644 --- a/internal/pkg/agent/storage/store/state_store.go +++ b/internal/pkg/agent/storage/store/state_store.go @@ -16,23 +16,18 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) type dispatcher interface { - Dispatch(context.Context, FleetAcker, ...action) error + Dispatch(context.Context, acker.Acker, ...action) error } type store interface { Save(io.Reader) error } -// FleetAcker is an acker of actions to fleet. -type FleetAcker interface { - Ack(ctx context.Context, action fleetapi.Action) error - Commit(ctx context.Context) error -} - type storeLoad interface { store Load() (io.ReadCloser, error) @@ -93,7 +88,7 @@ func NewStateStoreWithMigration(log *logger.Logger, actionStorePath, stateStoreP } // NewStateStoreActionAcker creates a new state store backed action acker. -func NewStateStoreActionAcker(acker FleetAcker, store *StateStore) *StateStoreActionAcker { +func NewStateStoreActionAcker(acker acker.Acker, store *StateStore) *StateStoreActionAcker { return &StateStoreActionAcker{acker: acker, store: store} } @@ -326,7 +321,7 @@ func (s *StateStore) AckToken() string { // its up to the action store to decide if we need to persist the event for future replay or just // discard the event. type StateStoreActionAcker struct { - acker FleetAcker + acker acker.Acker store *StateStore } @@ -350,7 +345,7 @@ func ReplayActions( ctx context.Context, log *logger.Logger, dispatcher dispatcher, - acker FleetAcker, + acker acker.Acker, actions ...action, ) error { log.Info("restoring current policy from disk") diff --git a/internal/pkg/capabilities/capabilities.go b/internal/pkg/capabilities/capabilities.go index fa360a53794..3d03fab9296 100644 --- a/internal/pkg/capabilities/capabilities.go +++ b/internal/pkg/capabilities/capabilities.go @@ -8,11 +8,8 @@ import ( "errors" "os" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -30,14 +27,13 @@ var ( ) type capabilitiesManager struct { - caps []Capability - reporter status.Reporter + caps []Capability } -type capabilityFactory func(*logger.Logger, *ruleDefinitions, status.Reporter) (Capability, error) +type capabilityFactory func(*logger.Logger, *ruleDefinitions) (Capability, error) // Load loads capabilities files and prepares manager. -func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability, error) { +func Load(capsFile string, log *logger.Logger) (Capability, error) { handlers := []capabilityFactory{ newInputsCapability, newOutputsCapability, @@ -45,8 +41,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability } cm := &capabilitiesManager{ - caps: make([]Capability, 0), - reporter: sc.RegisterComponentWithPersistance("capabilities", true), + caps: make([]Capability, 0), } // load capabilities from file @@ -56,7 +51,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability } if os.IsNotExist(err) { - log.Infof("capabilities file not found in %s", capsFile) + log.Infof("Capabilities file not found in %s", capsFile) return cm, nil } defer fd.Close() @@ -69,7 +64,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability // make list of handlers out of capabilities definition for _, h := range handlers { - cap, err := h(log, definitions, cm.reporter) + cap, err := h(log, definitions) if err != nil { return nil, err } @@ -86,8 +81,6 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability func (mgr *capabilitiesManager) Apply(in interface{}) (interface{}, error) { var err error - // reset health on start, child caps will update to fail if needed - mgr.reporter.Update(state.Healthy, "", nil) for _, cap := range mgr.caps { in, err = cap.Apply(in) if err != nil { diff --git a/internal/pkg/capabilities/input.go b/internal/pkg/capabilities/input.go index 7ebc4b4fb15..2428c49f064 100644 --- a/internal/pkg/capabilities/input.go +++ b/internal/pkg/capabilities/input.go @@ -7,11 +7,8 @@ package capabilities import ( "fmt" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -19,7 +16,7 @@ const ( inputsKey = "inputs" ) -func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newInputsCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiInputsCapability{log: log, caps: []*inputCapability{}}, nil } @@ -27,7 +24,7 @@ func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter statu caps := make([]*inputCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newInputCapability(log, r, reporter) + c, err := newInputCapability(log, r) if err != nil { return nil, err } @@ -40,23 +37,21 @@ func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter statu return &multiInputsCapability{log: log, caps: caps}, nil } -func newInputCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*inputCapability, error) { +func newInputCapability(log *logger.Logger, r ruler) (*inputCapability, error) { cap, ok := r.(*inputCapability) if !ok { return nil, nil } cap.log = log - cap.reporter = reporter return cap, nil } type inputCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` - Input string `json:"input" yaml:"input"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` + Input string `json:"input" yaml:"input"` } func (c *inputCapability) Apply(cfgMap map[string]interface{}) (map[string]interface{}, error) { @@ -166,7 +161,6 @@ func (c *inputCapability) renderInputs(inputs []map[string]interface{}) ([]map[s if !isSupported { msg := fmt.Sprintf("input '%s' is not run due to capability restriction '%s'", inputType, c.name()) c.log.Infof(msg) - c.reporter.Update(state.Degraded, msg, nil) } newInputs = append(newInputs, input) diff --git a/internal/pkg/capabilities/output.go b/internal/pkg/capabilities/output.go index de11c3ce3b9..804ca64faa2 100644 --- a/internal/pkg/capabilities/output.go +++ b/internal/pkg/capabilities/output.go @@ -7,10 +7,7 @@ package capabilities import ( "fmt" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -19,7 +16,7 @@ const ( typeKey = "type" ) -func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiOutputsCapability{log: log, caps: []*outputCapability{}}, nil } @@ -27,7 +24,7 @@ func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter stat caps := make([]*outputCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newOutputCapability(log, r, reporter) + c, err := newOutputCapability(log, r) if err != nil { return nil, err } @@ -40,23 +37,21 @@ func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter stat return &multiOutputsCapability{log: log, caps: caps}, nil } -func newOutputCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*outputCapability, error) { +func newOutputCapability(log *logger.Logger, r ruler) (*outputCapability, error) { cap, ok := r.(*outputCapability) if !ok { return nil, nil } cap.log = log - cap.reporter = reporter return cap, nil } type outputCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` - Output string `json:"output" yaml:"output"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` + Output string `json:"output" yaml:"output"` } func (c *outputCapability) Apply(cfgMap map[string]interface{}) (map[string]interface{}, error) { @@ -133,7 +128,6 @@ func (c *outputCapability) renderOutputs(outputs map[string]interface{}) (map[st if !isSupported { msg := fmt.Sprintf("output '%s' is left out due to capability restriction '%s'", outputName, c.name()) c.log.Errorf(msg) - c.reporter.Update(state.Degraded, msg, nil) } } diff --git a/internal/pkg/capabilities/upgrade.go b/internal/pkg/capabilities/upgrade.go index e39c963e222..2773f6c9709 100644 --- a/internal/pkg/capabilities/upgrade.go +++ b/internal/pkg/capabilities/upgrade.go @@ -8,12 +8,8 @@ import ( "fmt" "strings" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/eql" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -26,7 +22,7 @@ const ( // Available variables: // - version // - source_uri -func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiUpgradeCapability{caps: []*upgradeCapability{}}, nil } @@ -34,7 +30,7 @@ func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter sta caps := make([]*upgradeCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newUpgradeCapability(log, r, reporter) + c, err := newUpgradeCapability(log, r) if err != nil { return nil, err } @@ -47,7 +43,7 @@ func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter sta return &multiUpgradeCapability{log: log, caps: caps}, nil } -func newUpgradeCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*upgradeCapability, error) { +func newUpgradeCapability(log *logger.Logger, r ruler) (*upgradeCapability, error) { cap, ok := r.(*upgradeCapability) if !ok { return nil, nil @@ -70,15 +66,13 @@ func newUpgradeCapability(log *logger.Logger, r ruler, reporter status.Reporter) cap.upgradeEql = eqlExp cap.log = log - cap.reporter = reporter return cap, nil } type upgradeCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` // UpgradeEql is eql expression defining upgrade UpgradeEqlDefinition string `json:"upgrade" yaml:"upgrade"` @@ -129,7 +123,6 @@ func (c *upgradeCapability) Apply(upgradeMap map[string]interface{}) (map[string isSupported = !isSupported msg := fmt.Sprintf("upgrade is blocked out due to capability restriction '%s'", c.name()) c.log.Errorf(msg) - c.reporter.Update(state.Degraded, msg, nil) } if !isSupported { @@ -163,31 +156,8 @@ func (c *multiUpgradeCapability) Apply(in interface{}) (interface{}, error) { } func upgradeObject(a interface{}) map[string]interface{} { - resultMap := make(map[string]interface{}) - if ua, ok := a.(upgradeAction); ok { - resultMap[versionKey] = ua.Version() - resultMap[sourceURIKey] = ua.SourceURI() - return resultMap + if m, ok := a.(map[string]interface{}); ok { + return m } - - if ua, ok := a.(*fleetapi.ActionUpgrade); ok { - resultMap[versionKey] = ua.Version - resultMap[sourceURIKey] = ua.SourceURI - return resultMap - } - - if ua, ok := a.(fleetapi.ActionUpgrade); ok { - resultMap[versionKey] = ua.Version - resultMap[sourceURIKey] = ua.SourceURI - return resultMap - } - return nil } - -type upgradeAction interface { - // Version to upgrade to. - Version() string - // SourceURI for download. - SourceURI() string -} diff --git a/internal/pkg/composable/context.go b/internal/pkg/composable/context.go index 1dcb50cf956..97767f4a5d5 100644 --- a/internal/pkg/composable/context.go +++ b/internal/pkg/composable/context.go @@ -16,6 +16,14 @@ import ( // ContextProviderBuilder creates a new context provider based on the given config and returns it. type ContextProviderBuilder func(log *logger.Logger, config *config.Config) (corecomp.ContextProvider, error) +// MustAddContextProvider adds a new ContextProviderBuilder and panics if it AddContextProvider returns an error. +func (r *providerRegistry) MustAddContextProvider(name string, builder ContextProviderBuilder) { + err := r.AddContextProvider(name, builder) + if err != nil { + panic(err) + } +} + // AddContextProvider adds a new ContextProviderBuilder func (r *providerRegistry) AddContextProvider(name string, builder ContextProviderBuilder) error { r.lock.Lock() diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index a14e111194f..116424ae8e4 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -22,19 +22,25 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -// VarsCallback is callback called when the current vars state changes. -type VarsCallback func([]*transpiler.Vars) - // Controller manages the state of the providers current context. type Controller interface { // Run runs the controller. // // Cancelling the context stops the controller. - Run(ctx context.Context, cb VarsCallback) error + Run(ctx context.Context) error + + // Errors returns the channel to watch for reported errors. + Errors() <-chan error + + // Watch returns the channel to watch for variable changes. + Watch() <-chan []*transpiler.Vars } // controller manages the state of the providers current context. type controller struct { + logger *logger.Logger + ch chan []*transpiler.Vars + errCh chan error contextProviders map[string]*contextProviderState dynamicProviders map[string]*dynamicProviderState } @@ -87,28 +93,40 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { } return &controller{ + logger: l, + ch: make(chan []*transpiler.Vars), + errCh: make(chan error), contextProviders: contextProviders, dynamicProviders: dynamicProviders, }, nil } // Run runs the controller. -func (c *controller) Run(ctx context.Context, cb VarsCallback) error { - // large number not to block performing Run on the provided providers - notify := make(chan bool, 5000) +func (c *controller) Run(ctx context.Context) error { + c.logger.Debugf("Starting controller for composable inputs") + defer c.logger.Debugf("Stopped controller for composable inputs") + + notify := make(chan bool) localCtx, cancel := context.WithCancel(ctx) + defer cancel() fetchContextProviders := mapstr.M{} + var wg sync.WaitGroup + wg.Add(len(c.contextProviders) + len(c.dynamicProviders)) + // run all the enabled context providers for name, state := range c.contextProviders { state.Context = localCtx state.signal = notify - err := state.provider.Run(state) - if err != nil { - cancel() - return errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) - } + go func(name string, state *contextProviderState) { + defer wg.Done() + err := state.provider.Run(state) + if err != nil && !errors.Is(err, context.Canceled) { + err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) + c.logger.Errorf("%s", err) + } + }(name, state) if p, ok := state.provider.(corecomp.FetchContextProvider); ok { _, _ = fetchContextProviders.Put(name, p) } @@ -118,65 +136,73 @@ func (c *controller) Run(ctx context.Context, cb VarsCallback) error { for name, state := range c.dynamicProviders { state.Context = localCtx state.signal = notify - err := state.provider.Run(state) - if err != nil { - cancel() - return errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) - } + go func(name string, state *dynamicProviderState) { + defer wg.Done() + err := state.provider.Run(state) + if err != nil && !errors.Is(err, context.Canceled) { + err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) + c.logger.Errorf("%s", err) + } + }(name, state) } - go func() { + c.logger.Debugf("Started controller for composable inputs") + + // performs debounce of notifies; accumulates them into 100 millisecond chunks + t := time.NewTimer(100 * time.Millisecond) + for { + DEBOUNCE: for { - // performs debounce of notifies; accumulates them into 100 millisecond chunks - changed := false - t := time.NewTimer(100 * time.Millisecond) - for { - exitloop := false - select { - case <-ctx.Done(): - cancel() - return - case <-notify: - changed = true - case <-t.C: - exitloop = true - } - if exitloop { - break - } + select { + case <-ctx.Done(): + c.logger.Debugf("Stopping controller for composable inputs") + t.Stop() + cancel() + wg.Wait() + return ctx.Err() + case <-notify: + t.Reset(100 * time.Millisecond) + c.logger.Debugf("Variable state changed for composable inputs; debounce started") + drainChan(notify) + case <-t.C: + break DEBOUNCE } + } - t.Stop() - if !changed { - continue - } + c.logger.Debugf("Computing new variable state for composable inputs") - // build the vars list of mappings - vars := make([]*transpiler.Vars, 1) - mapping := map[string]interface{}{} - for name, state := range c.contextProviders { - mapping[name] = state.Current() - } - // this is ensured not to error, by how the mappings states are verified - vars[0], _ = transpiler.NewVars(mapping, fetchContextProviders) - - // add to the vars list for each dynamic providers mappings - for name, state := range c.dynamicProviders { - for _, mappings := range state.Mappings() { - local, _ := cloneMap(mapping) // will not fail; already been successfully cloned once - local[name] = mappings.mapping - // this is ensured not to error, by how the mappings states are verified - v, _ := transpiler.NewVarsWithProcessors(local, name, mappings.processors, fetchContextProviders) - vars = append(vars, v) - } + // build the vars list of mappings + vars := make([]*transpiler.Vars, 1) + mapping := map[string]interface{}{} + for name, state := range c.contextProviders { + mapping[name] = state.Current() + } + // this is ensured not to error, by how the mappings states are verified + vars[0], _ = transpiler.NewVars(mapping, fetchContextProviders) + + // add to the vars list for each dynamic providers mappings + for name, state := range c.dynamicProviders { + for _, mappings := range state.Mappings() { + local, _ := cloneMap(mapping) // will not fail; already been successfully cloned once + local[name] = mappings.mapping + // this is ensured not to error, by how the mappings states are verified + v, _ := transpiler.NewVarsWithProcessors(local, name, mappings.processors, fetchContextProviders) + vars = append(vars, v) } - - // execute the callback - cb(vars) } - }() - return nil + c.ch <- vars + } +} + +// Errors returns the channel to watch for reported errors. +func (c *controller) Errors() <-chan error { + return c.errCh +} + +// Watch returns the channel for variable changes. +func (c *controller) Watch() <-chan []*transpiler.Vars { + return c.ch } type contextProviderState struct { @@ -351,3 +377,13 @@ func addToSet(set []int, i int) []int { } return append(set, i) } + +func drainChan(ch chan bool) { + for { + select { + case <-ch: + default: + return + } + } +} diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index 09780767928..a8c3ec7df93 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -6,8 +6,9 @@ package composable_test import ( "context" - "sync" + "errors" "testing" + "time" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -80,17 +81,34 @@ func TestController(t *testing.T) { c, err := composable.New(log, cfg) require.NoError(t, err) - var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wg.Add(1) + + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 1*time.Second) + defer timeoutCancel() + var setVars []*transpiler.Vars - err = c.Run(ctx, func(vars []*transpiler.Vars) { - setVars = vars - wg.Done() - }) + go func() { + defer cancel() + for { + select { + case <-timeoutCtx.Done(): + return + case vars := <-c.Watch(): + setVars = vars + } + } + }() + + errCh := make(chan error) + go func() { + errCh <- c.Run(ctx) + }() + err = <-errCh + if errors.Is(err, context.Canceled) { + err = nil + } require.NoError(t, err) - wg.Wait() assert.Len(t, setVars, 3) @@ -99,14 +117,17 @@ func TestController(t *testing.T) { _, envExists := setVars[0].Lookup("env") assert.False(t, envExists) local, _ := setVars[0].Lookup("local") - localMap := local.(map[string]interface{}) + localMap, ok := local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[1].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, ok = local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[2].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, ok = local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value2", localMap["key1"]) } diff --git a/internal/pkg/composable/dynamic.go b/internal/pkg/composable/dynamic.go index a0de3543a1c..c83c2ccc2e2 100644 --- a/internal/pkg/composable/dynamic.go +++ b/internal/pkg/composable/dynamic.go @@ -36,6 +36,14 @@ type DynamicProvider interface { // DynamicProviderBuilder creates a new dynamic provider based on the given config and returns it. type DynamicProviderBuilder func(log *logger.Logger, config *config.Config) (DynamicProvider, error) +// MustAddDynamicProvider adds a new DynamicProviderBuilder and panics if it AddDynamicProvider returns an error. +func (r *providerRegistry) MustAddDynamicProvider(name string, builder DynamicProviderBuilder) { + err := r.AddDynamicProvider(name, builder) + if err != nil { + panic(err) + } +} + // AddDynamicProvider adds a new DynamicProviderBuilder func (r *providerRegistry) AddDynamicProvider(name string, builder DynamicProviderBuilder) error { r.lock.Lock() diff --git a/internal/pkg/composable/providers/agent/agent.go b/internal/pkg/composable/providers/agent/agent.go index 2b9d0ff3deb..ed8eb956afe 100644 --- a/internal/pkg/composable/providers/agent/agent.go +++ b/internal/pkg/composable/providers/agent/agent.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("agent", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("agent", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index 4bdc6d11cfe..8647677e6e8 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -23,7 +23,7 @@ import ( const ContainerPriority = 0 func init() { - _ = composable.Providers.AddDynamicProvider("docker", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("docker", DynamicProviderBuilder) } type dockerContainerData struct { @@ -54,54 +54,51 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { c.logger.Infof("Docker provider skipped, unable to connect: %s", err) return nil } + defer watcher.Stop() - go func() { - for { - select { - case <-comm.Done(): - startListener.Stop() - stopListener.Stop() + for { + select { + case <-comm.Done(): + startListener.Stop() + stopListener.Stop() - // Stop all timers before closing the channel - for _, stopper := range stoppers { - stopper.Stop() - } - close(stopTrigger) - return - case event := <-startListener.Events(): - data, err := generateData(event) - if err != nil { - c.logger.Errorf("%s", err) - continue - } - if stopper, ok := stoppers[data.container.ID]; ok { - c.logger.Debugf("container %s is restarting, aborting pending stop", data.container.ID) - stopper.Stop() - delete(stoppers, data.container.ID) - return - } - err = comm.AddOrUpdate(data.container.ID, ContainerPriority, data.mapping, data.processors) - if err != nil { - c.logger.Errorf("%s", err) - } - case event := <-stopListener.Events(): - data, err := generateData(event) - if err != nil { - c.logger.Errorf("%s", err) - continue - } - stopper := time.AfterFunc(c.config.CleanupTimeout, func() { - stopTrigger <- data - }) - stoppers[data.container.ID] = stopper - case data := <-stopTrigger: + // Stop all timers before closing the channel + for _, stopper := range stoppers { + stopper.Stop() + } + close(stopTrigger) + return comm.Err() + case event := <-startListener.Events(): + data, err := generateData(event) + if err != nil { + c.logger.Errorf("%s", err) + continue + } + if stopper, ok := stoppers[data.container.ID]; ok { + c.logger.Debugf("container %s is restarting, aborting pending stop", data.container.ID) + stopper.Stop() delete(stoppers, data.container.ID) - comm.Remove(data.container.ID) + continue + } + err = comm.AddOrUpdate(data.container.ID, ContainerPriority, data.mapping, data.processors) + if err != nil { + c.logger.Errorf("%s", err) } + case event := <-stopListener.Events(): + data, err := generateData(event) + if err != nil { + c.logger.Errorf("%s", err) + continue + } + stopper := time.AfterFunc(c.config.CleanupTimeout, func() { + stopTrigger <- data + }) + stoppers[data.container.ID] = stopper + case data := <-stopTrigger: + delete(stoppers, data.container.ID) + comm.Remove(data.container.ID) } - }() - - return nil + } } // DynamicProviderBuilder builds the dynamic provider. diff --git a/internal/pkg/composable/providers/env/env.go b/internal/pkg/composable/providers/env/env.go index 4c6b5911f47..6f65120de48 100644 --- a/internal/pkg/composable/providers/env/env.go +++ b/internal/pkg/composable/providers/env/env.go @@ -16,7 +16,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("env", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("env", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/composable/providers/host/host.go b/internal/pkg/composable/providers/host/host.go index 25d53430a2f..cc98021e77b 100644 --- a/internal/pkg/composable/providers/host/host.go +++ b/internal/pkg/composable/providers/host/host.go @@ -24,7 +24,7 @@ import ( const DefaultCheckInterval = 5 * time.Minute func init() { - composable.Providers.AddContextProvider("host", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("host", ContextProviderBuilder) } type infoFetcher func() (map[string]interface{}, error) @@ -50,34 +50,30 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // Update context when any host information changes. - go func() { - for { - t := time.NewTimer(c.CheckInterval) - select { - case <-comm.Done(): - t.Stop() - return - case <-t.C: - } - - updated, err := c.fetcher() - if err != nil { - c.logger.Warnf("Failed fetching latest host information: %s", err) - continue - } - if reflect.DeepEqual(current, updated) { - // nothing to do - continue - } - current = updated - err = comm.Set(updated) - if err != nil { - c.logger.Errorf("Failed updating mapping to latest host information: %s", err) - } + for { + t := time.NewTimer(c.CheckInterval) + select { + case <-comm.Done(): + t.Stop() + return comm.Err() + case <-t.C: } - }() - return nil + updated, err := c.fetcher() + if err != nil { + c.logger.Warnf("Failed fetching latest host information: %s", err) + continue + } + if reflect.DeepEqual(current, updated) { + // nothing to do + continue + } + current = updated + err = comm.Set(updated) + if err != nil { + c.logger.Errorf("Failed updating mapping to latest host information: %s", err) + } + } } // ContextProviderBuilder builds the context provider. diff --git a/internal/pkg/composable/providers/host/host_test.go b/internal/pkg/composable/providers/host/host_test.go index 8e117fcbeb4..869f6a82050 100644 --- a/internal/pkg/composable/providers/host/host_test.go +++ b/internal/pkg/composable/providers/host/host_test.go @@ -41,15 +41,28 @@ func TestContextProvider(t *testing.T) { require.Equal(t, 100*time.Millisecond, hostProvider.CheckInterval) ctx, cancel := context.WithCancel(context.Background()) + defer cancel() comm := ctesting.NewContextComm(ctx) - err = provider.Run(comm) + + go func() { + err = provider.Run(comm) + }() + + // wait for it to be called once + var wg sync.WaitGroup + wg.Add(1) + comm.CallOnSet(func() { + wg.Done() + }) + wg.Wait() + comm.CallOnSet(nil) + require.NoError(t, err) starting, err = ctesting.CloneMap(starting) require.NoError(t, err) require.Equal(t, starting, comm.Current()) // wait for it to be called again - var wg sync.WaitGroup wg.Add(1) comm.CallOnSet(func() { wg.Done() diff --git a/internal/pkg/composable/providers/kubernetes/kubernetes.go b/internal/pkg/composable/providers/kubernetes/kubernetes.go index 91367c5252f..9f43522f2da 100644 --- a/internal/pkg/composable/providers/kubernetes/kubernetes.go +++ b/internal/pkg/composable/providers/kubernetes/kubernetes.go @@ -30,7 +30,7 @@ const ( const nodeScope = "node" func init() { - _ = composable.Providers.AddDynamicProvider("kubernetes", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("kubernetes", DynamicProviderBuilder) } type dynamicProvider struct { @@ -54,37 +54,51 @@ func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable // Run runs the kubernetes context provider. func (p *dynamicProvider) Run(comm composable.DynamicProviderComm) error { + eventers := make([]Eventer, 0, 3) if p.config.Resources.Pod.Enabled { - err := p.watchResource(comm, "pod") + eventer, err := p.watchResource(comm, "pod") if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } } if p.config.Resources.Node.Enabled { - err := p.watchResource(comm, nodeScope) + eventer, err := p.watchResource(comm, nodeScope) if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } } if p.config.Resources.Service.Enabled { - err := p.watchResource(comm, "service") + eventer, err := p.watchResource(comm, "service") if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } + } + <-comm.Done() + for _, eventer := range eventers { + eventer.Stop() } - return nil + return comm.Err() } // watchResource initializes the proper watcher according to the given resource (pod, node, service) // and starts watching for such resource's events. func (p *dynamicProvider) watchResource( comm composable.DynamicProviderComm, - resourceType string) error { + resourceType string) (Eventer, error) { client, err := kubernetes.GetKubernetesClient(p.config.KubeConfig, p.config.KubeClientOptions) if err != nil { // info only; return nil (do nothing) p.logger.Debugf("Kubernetes provider for resource %s skipped, unable to connect: %s", resourceType, err) - return nil + return nil, nil } // Ensure that node is set correctly whenever the scope is set to "node". Make sure that node is empty @@ -105,7 +119,7 @@ func (p *dynamicProvider) watchResource( p.config.Node, err = kubernetes.DiscoverKubernetesNode(p.logger, nd) if err != nil { p.logger.Debugf("Kubernetes provider skipped, unable to discover node: %w", err) - return nil + return nil, nil } } else { @@ -114,15 +128,15 @@ func (p *dynamicProvider) watchResource( eventer, err := p.newEventer(resourceType, comm, client) if err != nil { - return errors.New(err, "couldn't create kubernetes watcher for resource %s", resourceType) + return nil, errors.New(err, "couldn't create kubernetes watcher for resource %s", resourceType) } err = eventer.Start() if err != nil { - return errors.New(err, "couldn't start kubernetes eventer for resource %s", resourceType) + return nil, errors.New(err, "couldn't start kubernetes eventer for resource %s", resourceType) } - return nil + return eventer, nil } // Eventer allows defining ways in which kubernetes resource events are observed and processed diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go index 410e13ec77d..d0d773d1663 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go @@ -23,15 +23,13 @@ import ( ) func init() { - _ = composable.Providers.AddContextProvider("kubernetes_leaderelection", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("kubernetes_leaderelection", ContextProviderBuilder) } type contextProvider struct { - logger *logger.Logger - config *Config - comm corecomp.ContextProviderComm - leaderElection *leaderelection.LeaderElectionConfig - cancelLeaderElection context.CancelFunc + logger *logger.Logger + config *Config + leaderElection *leaderelection.LeaderElectionConfig } // ContextProviderBuilder builds the provider. @@ -44,7 +42,7 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.C if err != nil { return nil, errors.New(err, "failed to unpack configuration") } - return &contextProvider{logger, &cfg, nil, nil, nil}, nil + return &contextProvider{logger, &cfg, nil}, nil } // Run runs the leaderelection provider. @@ -91,57 +89,43 @@ func (p *contextProvider) Run(comm corecomp.ContextProviderComm) error { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { p.logger.Debugf("leader election lock GAINED, id %v", id) - p.startLeading() + p.startLeading(comm) }, OnStoppedLeading: func() { p.logger.Debugf("leader election lock LOST, id %v", id) - p.stopLeading() + p.stopLeading(comm) }, }, } - ctx, cancel := context.WithCancel(context.TODO()) - p.cancelLeaderElection = cancel - p.comm = comm - p.startLeaderElector(ctx) - return nil -} - -// startLeaderElector starts a Leader Elector in the background with the provided config -func (p *contextProvider) startLeaderElector(ctx context.Context) { le, err := leaderelection.NewLeaderElector(*p.leaderElection) if err != nil { p.logger.Errorf("error while creating Leader Elector: %v", err) } p.logger.Debugf("Starting Leader Elector") - go le.Run(ctx) + le.Run(comm) + p.logger.Debugf("Stopped Leader Elector") + return comm.Err() } -func (p *contextProvider) startLeading() { +func (p *contextProvider) startLeading(comm corecomp.ContextProviderComm) { mapping := map[string]interface{}{ "leader": true, } - err := p.comm.Set(mapping) + err := comm.Set(mapping) if err != nil { p.logger.Errorf("Failed updating leaderelection status to leader TRUE: %s", err) } } -func (p *contextProvider) stopLeading() { +func (p *contextProvider) stopLeading(comm corecomp.ContextProviderComm) { mapping := map[string]interface{}{ "leader": false, } - err := p.comm.Set(mapping) + err := comm.Set(mapping) if err != nil { p.logger.Errorf("Failed updating leaderelection status to leader FALSE: %s", err) } } - -// Stop signals the stop channel to force the leader election loop routine to stop. -func (p *contextProvider) Stop() { - if p.cancelLeaderElection != nil { - p.cancelLeaderElection() - } -} diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index 0bc560295ed..d6e8190c13a 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -7,6 +7,7 @@ package kubernetessecrets import ( "context" "strings" + "sync" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" @@ -23,14 +24,15 @@ var _ corecomp.FetchContextProvider = (*contextProviderK8sSecrets)(nil) var getK8sClientFunc = getK8sClient func init() { - _ = composable.Providers.AddContextProvider("kubernetes_secrets", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("kubernetes_secrets", ContextProviderBuilder) } type contextProviderK8sSecrets struct { logger *logger.Logger config *Config - client k8sclient.Interface + clientMx sync.Mutex + client k8sclient.Interface } // ContextProviderBuilder builds the context provider. @@ -43,12 +45,18 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.C if err != nil { return nil, errors.New(err, "failed to unpack configuration") } - return &contextProviderK8sSecrets{logger, &cfg, nil}, nil + return &contextProviderK8sSecrets{ + logger: logger, + config: &cfg, + }, nil } func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { // key = "kubernetes_secrets.somenamespace.somesecret.value" - if p.client == nil { + p.clientMx.Lock() + client := p.client + p.clientMx.Unlock() + if client == nil { return "", false } tokens := strings.Split(key, ".") @@ -67,7 +75,7 @@ func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { secretName := tokens[2] secretVar := tokens[3] - secretIntefrace := p.client.CoreV1().Secrets(ns) + secretIntefrace := client.CoreV1().Secrets(ns) ctx := context.TODO() secret, err := secretIntefrace.Get(ctx, secretName, metav1.GetOptions{}) if err != nil { @@ -89,8 +97,14 @@ func (p *contextProviderK8sSecrets) Run(comm corecomp.ContextProviderComm) error p.logger.Debugf("Kubernetes_secrets provider skipped, unable to connect: %s", err) return nil } + p.clientMx.Lock() p.client = client - return nil + p.clientMx.Unlock() + <-comm.Done() + p.clientMx.Lock() + p.client = nil + p.clientMx.Unlock() + return comm.Err() } func getK8sClient(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index 4c80800a59b..388f33074bb 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -7,6 +7,9 @@ package kubernetessecrets import ( "context" "testing" + "time" + + ctesting "github.com/elastic/elastic-agent/internal/pkg/composable/testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,7 +22,6 @@ import ( "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/config" - corecomp "github.com/elastic/elastic-agent/internal/pkg/core/composable" ) const ( @@ -52,13 +54,31 @@ func Test_K8sSecretsProvider_Fetch(t *testing.T) { p, err := ContextProviderBuilder(logger, cfg) require.NoError(t, err) - fp, _ := p.(corecomp.FetchContextProvider) + fp, _ := p.(*contextProviderK8sSecrets) getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { return client, nil } require.NoError(t, err) - _ = fp.Run(nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + + go func() { + _ = fp.Run(comm) + }() + + for { + fp.clientMx.Lock() + client := fp.client + fp.clientMx.Unlock() + if client != nil { + break + } + <-time.After(10 * time.Millisecond) + } + val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secret.secret_value") assert.True(t, found) assert.Equal(t, val, pass) @@ -89,13 +109,31 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { p, err := ContextProviderBuilder(logger, cfg) require.NoError(t, err) - fp, _ := p.(corecomp.FetchContextProvider) + fp, _ := p.(*contextProviderK8sSecrets) getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { return client, nil } require.NoError(t, err) - _ = fp.Run(nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + + go func() { + _ = fp.Run(comm) + }() + + for { + fp.clientMx.Lock() + client := fp.client + fp.clientMx.Unlock() + if client != nil { + break + } + <-time.After(10 * time.Millisecond) + } + val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secretHACK.secret_value") assert.False(t, found) assert.EqualValues(t, val, "") diff --git a/internal/pkg/composable/providers/local/local.go b/internal/pkg/composable/providers/local/local.go index 9c611ecbd13..b44affc78df 100644 --- a/internal/pkg/composable/providers/local/local.go +++ b/internal/pkg/composable/providers/local/local.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("local", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("local", ContextProviderBuilder) } type contextProvider struct { diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic.go b/internal/pkg/composable/providers/localdynamic/localdynamic.go index f4f99ca4030..0fd81738976 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic.go @@ -18,7 +18,7 @@ import ( const ItemPriority = 0 func init() { - composable.Providers.AddDynamicProvider("local_dynamic", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("local_dynamic", DynamicProviderBuilder) } type dynamicItem struct { diff --git a/internal/pkg/composable/providers/path/path.go b/internal/pkg/composable/providers/path/path.go index 455f46d2b28..05af5bcd0b0 100644 --- a/internal/pkg/composable/providers/path/path.go +++ b/internal/pkg/composable/providers/path/path.go @@ -14,7 +14,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("path", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("path", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/core/composable/providers.go b/internal/pkg/core/composable/providers.go index d87437e2dae..235e17d83fa 100644 --- a/internal/pkg/core/composable/providers.go +++ b/internal/pkg/core/composable/providers.go @@ -6,11 +6,12 @@ package composable import "context" -// FetchContextProvider is the interface that a context provider uses so as to be able to be called -// explicitly on demand by vars framework in order to fetch specific target values like a k8s secret. +// FetchContextProvider is the interface that a context provider uses allow variable values to be determined when the +// configuration is rendered versus it being known in advanced. type FetchContextProvider interface { ContextProvider - // Run runs the inventory provider. + + // Fetch tries to fetch a value for a variable. Fetch(string) (string, bool) } diff --git a/internal/pkg/fleetapi/acker/noop/noop_acker.go b/internal/pkg/fleetapi/acker/noop/noop_acker.go index 3e2716193f0..7c410d73bc0 100644 --- a/internal/pkg/fleetapi/acker/noop/noop_acker.go +++ b/internal/pkg/fleetapi/acker/noop/noop_acker.go @@ -2,27 +2,29 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package fleet +package noop import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" ) -// Acker is a noop acker. +// noopAcker is a noop acker. // Methods of these acker do nothing. -type Acker struct{} +type noopAcker struct{} -// NewAcker creates a new noop acker. -func NewAcker() *Acker { - return &Acker{} +// New creates a new noop acker. +func New() acker.Acker { + return &noopAcker{} } // Ack acknowledges action. -func (f *Acker) Ack(ctx context.Context, action fleetapi.Action) error { +func (f *noopAcker) Ack(ctx context.Context, action fleetapi.Action) error { return nil } // Commit commits ack actions. -func (*Acker) Commit(ctx context.Context) error { return nil } +func (*noopAcker) Commit(ctx context.Context) error { return nil } diff --git a/pkg/component/load.go b/pkg/component/load.go index 38e934b836f..62a983f1f9d 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -146,6 +146,15 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp }, nil } +// Inputs returns the list of supported inputs for this platform. +func (r *RuntimeSpecs) Inputs() []string { + inputs := make([]string, 0, len(r.inputSpecs)) + for inputType := range r.inputSpecs { + inputs = append(inputs, inputType) + } + return inputs +} + // GetInput returns the input runtime specification for this input on this platform. func (r *RuntimeSpecs) GetInput(inputType string) (InputRuntimeSpec, error) { runtime, ok := r.inputSpecs[inputType] diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index e7125e82f68..8fbeeb73ff7 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -48,6 +48,12 @@ var ( ErrNoUnit = errors.New("no unit under control of this manager") ) +// ComponentComponentState provides a structure to map a component to current component state. +type ComponentComponentState struct { + Component component.Component + State ComponentState +} + // Manager for the entire runtime of operating components. type Manager struct { proto.UnimplementedElasticAgentServer @@ -67,8 +73,13 @@ type Manager struct { mx sync.RWMutex current map[string]*componentRuntimeState - subMx sync.RWMutex - subscriptions map[string][]*Subscription + subMx sync.RWMutex + subscriptions map[string][]*Subscription + subAllMx sync.RWMutex + subscribeAll []*SubscriptionAll + subscribeAllInit chan *SubscriptionAll + + errCh chan error shuttingDown atomic.Bool } @@ -87,6 +98,7 @@ func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (* waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), subscriptions: make(map[string][]*Subscription), + errCh: make(chan error), } return m, nil } @@ -215,6 +227,11 @@ func (m *Manager) WaitForReady(ctx context.Context) error { } } +// Errors returns channel that errors are reported on. +func (m *Manager) Errors() <-chan error { + return m.errCh +} + // Update updates the currComp state of the running components. // // This returns as soon as possible, work is performed in the background to @@ -229,6 +246,22 @@ func (m *Manager) Update(components []component.Component) error { return m.update(components, true) } +// State returns the current component states. +func (m *Manager) State() []ComponentComponentState { + m.mx.RLock() + defer m.mx.RUnlock() + states := make([]ComponentComponentState, 0, len(m.current)) + for _, crs := range m.current { + crs.latestMx.RLock() + states = append(states, ComponentComponentState{ + Component: crs.currComp, + State: crs.latestState.Copy(), + }) + crs.latestMx.RUnlock() + } + return states +} + // PerformAction executes an action on a unit. func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { id, err := uuid.NewV4() @@ -290,11 +323,11 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s // Subscribe to changes in a component. // // Allows a component without that ID to exists. Once a component starts matching that ID then changes will start to -// be provided over the channel. +// be provided over the channel. Cancelling the context results in the subscription being unsubscribed. // // Note: Not reading from a subscription channel will cause the Manager to block. -func (m *Manager) Subscribe(componentID string) *Subscription { - sub := newSubscription(m) +func (m *Manager) Subscribe(ctx context.Context, componentID string) *Subscription { + sub := newSubscription(ctx, m) // add latestState to channel m.mx.RLock() @@ -302,14 +335,88 @@ func (m *Manager) Subscribe(componentID string) *Subscription { m.mx.RUnlock() if ok { comp.latestMx.RLock() - sub.ch <- comp.latestState + latestState := comp.latestState.Copy() comp.latestMx.RUnlock() + go func() { + select { + case <-ctx.Done(): + case sub.ch <- latestState: + } + }() } // add subscription for future changes m.subMx.Lock() m.subscriptions[componentID] = append(m.subscriptions[componentID], sub) - defer m.subMx.Unlock() + m.subMx.Unlock() + + go func() { + <-ctx.Done() + + // unsubscribe + m.subMx.Lock() + defer m.subMx.Unlock() + for key, subs := range m.subscriptions { + for i, s := range subs { + if sub == s { + m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) + return + } + } + } + }() + + return sub +} + +// SubscribeAll subscribes to all changes in all components. +// +// This provides the current state for existing components at the time of first subscription. Cancelling the context +// results in the subscription being unsubscribed. +// +// Note: Not reading from a subscription channel will cause the Manager to block. +func (m *Manager) SubscribeAll(ctx context.Context) *SubscriptionAll { + sub := newSubscriptionAll(ctx, m) + + // add latest states + m.mx.RLock() + latest := make([]ComponentComponentState, 0, len(m.current)) + for _, comp := range m.current { + comp.latestMx.RLock() + latest = append(latest, ComponentComponentState{Component: comp.currComp, State: comp.latestState.Copy()}) + comp.latestMx.RUnlock() + } + m.mx.RUnlock() + if len(latest) > 0 { + go func() { + for _, l := range latest { + select { + case <-ctx.Done(): + return + case sub.ch <- l: + } + } + }() + } + + // add subscription for future changes + m.subAllMx.Lock() + m.subscribeAll = append(m.subscribeAll, sub) + m.subAllMx.Unlock() + + go func() { + <-ctx.Done() + + // unsubscribe + m.subAllMx.Lock() + defer m.subAllMx.Unlock() + for i, s := range m.subscribeAll { + if sub == s { + m.subscribeAll = append(m.subscribeAll[:i], m.subscribeAll[i+1:]...) + return + } + } + }() return sub } @@ -470,11 +577,26 @@ func (m *Manager) shutdown() { } func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) { + m.subAllMx.RLock() + for _, sub := range m.subscribeAll { + select { + case <-sub.ctx.Done(): + case sub.ch <- ComponentComponentState{ + Component: state.currComp, + State: latest, + }: + } + } + m.subAllMx.RUnlock() + m.subMx.RLock() subs, ok := m.subscriptions[state.currComp.ID] if ok { for _, sub := range subs { - sub.ch <- latest + select { + case <-sub.ctx.Done(): + case sub.ch <- latest: + } } } m.subMx.RUnlock() @@ -490,19 +612,6 @@ func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentSta } } -func (m *Manager) unsubscribe(subscription *Subscription) { - m.subMx.Lock() - defer m.subMx.Unlock() - for key, subs := range m.subscriptions { - for i, sub := range subs { - if subscription == sub { - m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) - return - } - } - } -} - func (m *Manager) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { var cert *tls.Certificate diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index adeb2b1243a..41a62557ea8 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -70,8 +70,7 @@ func TestManager_SimpleComponentErr(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("error-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "error-default") for { select { case <-subCtx.Done(): @@ -179,8 +178,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -297,8 +295,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -431,8 +428,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { go func() { unit1Stopped := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -575,8 +571,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -700,8 +695,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { go func() { killed := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -849,8 +843,7 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { go func() { wasDegraded := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -956,8 +949,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -1166,12 +1158,9 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { subErrCh1 := make(chan error) subErrCh2 := make(chan error) go func() { - sub0 := m.Subscribe("fake-0") - defer sub0.Unsubscribe() - sub1 := m.Subscribe("fake-1") - defer sub1.Unsubscribe() - sub2 := m.Subscribe("fake-2") - defer sub2.Unsubscribe() + sub0 := m.Subscribe(subCtx, "fake-0") + sub1 := m.Subscribe(subCtx, "fake-1") + sub2 := m.Subscribe(subCtx, "fake-2") for { select { case <-subCtx.Done(): diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index ee4800ce36b..b84e5b48202 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -35,12 +35,24 @@ type ComponentUnitKey struct { UnitID string } +// ComponentVersionInfo provides version information reported by the component. +type ComponentVersionInfo struct { + // Name of the binary. + Name string + // Version of the binary. + Version string + // Additional metadata about the binary. + Meta map[string]string +} + // ComponentState is the overall state of the component. type ComponentState struct { State client.UnitState Message string Units map[ComponentUnitKey]ComponentUnitState + + VersionInfo ComponentVersionInfo } func newComponentState(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) (s ComponentState) { @@ -157,6 +169,17 @@ func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { } } } + if checkin.VersionInfo != nil { + if checkin.VersionInfo.Name != "" { + s.VersionInfo.Name = checkin.VersionInfo.Name + } + if checkin.VersionInfo.Version != "" { + s.VersionInfo.Version = checkin.VersionInfo.Version + } + if checkin.VersionInfo.Meta != nil { + s.VersionInfo.Meta = checkin.VersionInfo.Meta + } + } return changed } @@ -280,7 +303,9 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. delete(state.actions, ar.Id) } state.actionsMx.Unlock() - callback(ar) + if ok { + callback(ar) + } } } }() diff --git a/pkg/component/runtime/subscription.go b/pkg/component/runtime/subscription.go index 88f4106d21a..15cfeac4f7d 100644 --- a/pkg/component/runtime/subscription.go +++ b/pkg/component/runtime/subscription.go @@ -4,16 +4,22 @@ package runtime +import ( + "context" +) + // Subscription provides a channel for notifications on a component state. type Subscription struct { + ctx context.Context manager *Manager ch chan ComponentState } -func newSubscription(manager *Manager) *Subscription { +func newSubscription(ctx context.Context, manager *Manager) *Subscription { return &Subscription{ + ctx: ctx, manager: manager, - ch: make(chan ComponentState, 1), // buffer of 1 to allow initial latestState state + ch: make(chan ComponentState), } } @@ -22,7 +28,22 @@ func (s *Subscription) Ch() <-chan ComponentState { return s.ch } -// Unsubscribe removes the subscription. -func (s *Subscription) Unsubscribe() { - s.manager.unsubscribe(s) +// SubscriptionAll provides a channel for notifications on all component state changes. +type SubscriptionAll struct { + ctx context.Context + manager *Manager + ch chan ComponentComponentState +} + +func newSubscriptionAll(ctx context.Context, manager *Manager) *SubscriptionAll { + return &SubscriptionAll{ + ctx: ctx, + manager: manager, + ch: make(chan ComponentComponentState), + } +} + +// Ch provides the channel to get state changes. +func (s *SubscriptionAll) Ch() <-chan ComponentComponentState { + return s.ch } diff --git a/pkg/core/server/config.go b/pkg/core/server/config.go deleted file mode 100644 index 0d1dbd9d5e3..00000000000 --- a/pkg/core/server/config.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "fmt" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// Config is a configuration of GRPC server. -type Config struct { - Address string `config:"address"` - Port uint16 `config:"port"` -} - -// DefaultGRPCConfig creates a default server configuration. -func DefaultGRPCConfig() *Config { - return &Config{ - Address: "localhost", - Port: 6789, - } -} - -// NewFromConfig creates a new GRPC server for clients to connect to. -func NewFromConfig(logger *logger.Logger, cfg *Config, handler Handler, tracer *apm.Tracer) (*Server, error) { - return New(logger, fmt.Sprintf("%s:%d", cfg.Address, cfg.Port), handler, tracer) -} diff --git a/pkg/core/server/config_test.go b/pkg/core/server/config_test.go deleted file mode 100644 index 2c846d77892..00000000000 --- a/pkg/core/server/config_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewFromConfig(t *testing.T) { - l := newErrorLogger(t) - cfg := &Config{ - Address: "0.0.0.0", - Port: 9876, - } - srv, err := NewFromConfig(l, cfg, &StubHandler{}, nil) - require.NoError(t, err) - assert.Equal(t, "0.0.0.0:9876", srv.getListenAddr()) -} diff --git a/pkg/core/server/server.go b/pkg/core/server/server.go deleted file mode 100644 index 6d3a284cd79..00000000000 --- a/pkg/core/server/server.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "net" - "strings" - "sync" - "time" - - "go.elastic.co/apm" - "go.elastic.co/apm/module/apmgrpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/gofrs/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - protobuf "google.golang.org/protobuf/proto" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/authority" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // InitialCheckinTimeout is the maximum amount of wait time from initial check-in stream to - // getting the first check-in observed state. - InitialCheckinTimeout = 5 * time.Second - // CheckinMinimumTimeoutGracePeriod is additional time added to the client.CheckinMinimumTimeout - // to ensure the application is checking in correctly. - CheckinMinimumTimeoutGracePeriod = 30 * time.Second - // WatchdogCheckLoop is the amount of time that the watchdog will wait between checking for - // applications that have not checked in the correct amount of time. - WatchdogCheckLoop = 5 * time.Second -) - -var ( - // ErrApplicationAlreadyRegistered returned when trying to register an application more than once. - ErrApplicationAlreadyRegistered = errors.New("application already registered", errors.TypeApplication) - // ErrApplicationStopping returned when trying to update an application config but it is stopping. - ErrApplicationStopping = errors.New("application stopping", errors.TypeApplication) - // ErrApplicationStopTimedOut returned when calling Stop and the application timed out stopping. - ErrApplicationStopTimedOut = errors.New("application stopping timed out", errors.TypeApplication) - // ErrActionTimedOut returned on PerformAction when the action timed out. - ErrActionTimedOut = errors.New("application action timed out", errors.TypeApplication) - // ErrActionCancelled returned on PerformAction when an action is cancelled, normally due to the application - // being stopped or removed from the server. - ErrActionCancelled = errors.New("application action cancelled", errors.TypeApplication) -) - -// ApplicationState represents the applications state according to the server. -type ApplicationState struct { - srv *Server - app interface{} - - srvName string - token string - cert *authority.Pair - - pendingExpected chan *proto.StateExpected - expected proto.StateExpected_State - expectedConfigIdx uint64 - expectedConfig string - status proto.StateObserved_Status - statusMessage string - statusPayload map[string]interface{} - statusPayloadStr string - statusConfigIdx uint64 - statusTime time.Time - checkinConn bool - checkinDone chan bool - checkinLock sync.RWMutex - - pendingActions chan *pendingAction - sentActions map[string]*sentAction - actionsConn bool - actionsDone chan bool - actionsLock sync.RWMutex - - inputTypes map[string]struct{} -} - -// Handler is the used by the server to inform of status changes. -type Handler interface { - // OnStatusChange called when a registered application observed status is changed. - OnStatusChange(*ApplicationState, proto.StateObserved_Status, string, map[string]interface{}) -} - -// Server is the GRPC server that the launched applications connect back to. -type Server struct { - proto.UnimplementedElasticAgentServer - - logger *logger.Logger - ca *authority.CertificateAuthority - listenAddr string - handler Handler - tracer *apm.Tracer - - listener net.Listener - server *grpc.Server - watchdogDone chan bool - watchdogWG sync.WaitGroup - - apps sync.Map - - // overridden in tests - watchdogCheckInterval time.Duration - checkInMinTimeout time.Duration -} - -// New creates a new GRPC server for clients to connect to. -func New(logger *logger.Logger, listenAddr string, handler Handler, tracer *apm.Tracer) (*Server, error) { - ca, err := authority.NewCA() - if err != nil { - return nil, err - } - return &Server{ - logger: logger, - ca: ca, - listenAddr: listenAddr, - handler: handler, - watchdogCheckInterval: WatchdogCheckLoop, - checkInMinTimeout: client.CheckinMinimumTimeout + CheckinMinimumTimeoutGracePeriod, - tracer: tracer, - }, nil -} - -// Start starts the GRPC endpoint and accepts new connections. -func (s *Server) Start() error { - if s.server != nil { - // already started - return nil - } - - lis, err := net.Listen("tcp", s.listenAddr) - if err != nil { - return err - } - s.listener = lis - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(s.ca.Crt()); !ok { - return errors.New("failed to append root CA", errors.TypeSecurity) - } - creds := credentials.NewTLS(&tls.Config{ - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: certPool, - GetCertificate: s.getCertificate, - MinVersion: tls.VersionTLS12, - }) - if s.tracer != nil { - apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(s.tracer)) - s.server = grpc.NewServer( - grpc.UnaryInterceptor(apmInterceptor), - grpc.Creds(creds), - ) - } else { - s.server = grpc.NewServer(grpc.Creds(creds)) - } - proto.RegisterElasticAgentServer(s.server, s) - - // start serving GRPC connections - go func() { - err := s.server.Serve(lis) - if err != nil { - s.logger.Errorf("error listening for GRPC: %s", err) - } - }() - - // start the watchdog - s.watchdogDone = make(chan bool) - s.watchdogWG.Add(1) - go s.watchdog() - - return nil -} - -// Stop stops the GRPC endpoint. -func (s *Server) Stop() { - if s.server != nil { - close(s.watchdogDone) - s.server.Stop() - s.server = nil - s.listener = nil - s.watchdogWG.Wait() - } -} - -// Get returns the application state from the server for the passed application. -func (s *Server) Get(app interface{}) (*ApplicationState, bool) { - var foundState *ApplicationState - s.apps.Range(func(_ interface{}, val interface{}) bool { - as, ok := val.(*ApplicationState) - if !ok { - return true - } - if as.app == app { - foundState = as - return false - } - return true - }) - return foundState, foundState != nil -} - -// FindByInputType application by input type -func (s *Server) FindByInputType(inputType string) (*ApplicationState, bool) { - var foundState *ApplicationState - s.apps.Range(func(_ interface{}, val interface{}) bool { - as, ok := val.(*ApplicationState) - if !ok { - return true - } - if as.inputTypes == nil { - return true - } - - if _, ok := as.inputTypes[inputType]; ok { - foundState = as - return false - } - return true - }) - return foundState, foundState != nil -} - -// Register registers a new application to connect to the server. -func (s *Server) Register(app interface{}, config string) (*ApplicationState, error) { - if _, ok := s.Get(app); ok { - return nil, ErrApplicationAlreadyRegistered - } - - id, err := uuid.NewV4() - if err != nil { - return nil, err - } - srvName, err := genServerName() - if err != nil { - return nil, err - } - pair, err := s.ca.GeneratePairWithName(srvName) - if err != nil { - return nil, err - } - appState := &ApplicationState{ - srv: s, - app: app, - srvName: srvName, - token: id.String(), - cert: pair, - pendingExpected: make(chan *proto.StateExpected), - expected: proto.StateExpected_RUNNING, - expectedConfigIdx: 1, - expectedConfig: config, - checkinConn: true, - status: proto.StateObserved_STARTING, - statusConfigIdx: client.InitialConfigIdx, - statusTime: time.Now().UTC(), - pendingActions: make(chan *pendingAction, 100), - sentActions: make(map[string]*sentAction), - actionsConn: true, - } - s.apps.Store(appState.token, appState) - return appState, nil -} - -// Checkin implements the GRPC bi-direction stream connection for check-ins. -func (s *Server) Checkin(server proto.ElasticAgent_CheckinServer) error { - firstCheckinChan := make(chan *proto.StateObserved) - go func() { - // go func will not be leaked, because when the main function - // returns it will close the connection. that will cause this - // function to return. - observed, err := server.Recv() - if err != nil { - close(firstCheckinChan) - return - } - firstCheckinChan <- observed - }() - - var ok bool - var observedConfigStateIdx uint64 - var firstCheckin *proto.StateObserved - select { - case firstCheckin, ok = <-firstCheckinChan: - if firstCheckin != nil { - observedConfigStateIdx = firstCheckin.ConfigStateIdx - } - break - case <-time.After(InitialCheckinTimeout): - // close connection - s.logger.Debug("check-in stream never sent initial observed message; closing connection") - return nil - } - if !ok { - // close connection - return nil - } - appState, ok := s.getByToken(firstCheckin.Token) - if !ok { - // no application with token; close connection - s.logger.Debug("check-in stream sent an invalid token; closing connection") - return status.Error(codes.PermissionDenied, "invalid token") - } - appState.checkinLock.Lock() - if appState.checkinDone != nil { - // application is already connected (cannot have multiple); close connection - appState.checkinLock.Unlock() - s.logger.Debug("check-in stream already exists for application; closing connection") - return status.Error(codes.AlreadyExists, "application already connected") - } - if !appState.checkinConn { - // application is being destroyed cannot reconnect; close connection - appState.checkinLock.Unlock() - s.logger.Debug("check-in stream cannot connect, application is being destroyed; closing connection") - return status.Error(codes.Unavailable, "application cannot connect being destroyed") - } - - // application is running as a service and counter is already counting - // force config reload - if observedConfigStateIdx > 0 { - appState.expectedConfigIdx = observedConfigStateIdx + 1 - } - - checkinDone := make(chan bool) - appState.checkinDone = checkinDone - appState.checkinLock.Unlock() - - defer func() { - appState.checkinLock.Lock() - appState.checkinDone = nil - appState.checkinLock.Unlock() - }() - - // send the config and expected state changes to the applications when - // pushed on the channel - recvDone := make(chan bool) - sendDone := make(chan bool) - go func() { - defer func() { - close(sendDone) - }() - for { - var expected *proto.StateExpected - select { - case <-checkinDone: - return - case <-recvDone: - return - case expected = <-appState.pendingExpected: - } - - err := server.Send(expected) - if err != nil { - if reportableErr(err) { - s.logger.Debugf("check-in stream failed to send expected state: %s", err) - } - return - } - } - }() - - // update status after the pendingExpected channel has a reader - appState.updateStatus(firstCheckin, true) - - // read incoming state observations from the application and act based on - // the servers expected state of the application - go func() { - for { - checkin, err := server.Recv() - if err != nil { - if reportableErr(err) { - s.logger.Debugf("check-in stream failed to receive data: %s", err) - } - close(recvDone) - return - } - appState.updateStatus(checkin, false) - } - }() - - <-sendDone - return nil -} - -// CheckinV2 implements the GRPC bi-direction stream connection for v2 check-ins. -func (s *Server) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { - return errors.New("not implemented") -} - -// Actions implements the GRPC bi-direction stream connection for actions. -func (s *Server) Actions(server proto.ElasticAgent_ActionsServer) error { - firstRespChan := make(chan *proto.ActionResponse) - go func() { - // go func will not be leaked, because when the main function - // returns it will close the connection. that will cause this - // function to return. - observed, err := server.Recv() - if err != nil { - close(firstRespChan) - return - } - firstRespChan <- observed - }() - - var ok bool - var firstResp *proto.ActionResponse - select { - case firstResp, ok = <-firstRespChan: - break - case <-time.After(InitialCheckinTimeout): - // close connection - s.logger.Debug("actions stream never sent initial response message; closing connection") - return nil - } - if !ok { - // close connection - return nil - } - if firstResp.Id != client.ActionResponseInitID { - // close connection - s.logger.Debug("actions stream first response message must be an init message; closing connection") - return status.Error(codes.InvalidArgument, "initial response must be an init message") - } - appState, ok := s.getByToken(firstResp.Token) - if !ok { - // no application with token; close connection - s.logger.Debug("actions stream sent an invalid token; closing connection") - return status.Error(codes.PermissionDenied, "invalid token") - } - appState.actionsLock.Lock() - if appState.actionsDone != nil { - // application is already connected (cannot have multiple); close connection - appState.actionsLock.Unlock() - s.logger.Debug("actions stream already exists for application; closing connection") - return status.Error(codes.AlreadyExists, "application already connected") - } - if !appState.actionsConn { - // application is being destroyed cannot reconnect; close connection - appState.actionsLock.Unlock() - s.logger.Debug("actions stream cannot connect, application is being destroyed; closing connection") - return status.Error(codes.Unavailable, "application cannot connect being destroyed") - } - actionsDone := make(chan bool) - appState.actionsDone = actionsDone - appState.actionsLock.Unlock() - - defer func() { - appState.actionsLock.Lock() - appState.actionsDone = nil - appState.actionsLock.Unlock() - }() - - // send the pending actions that need to be performed - recvDone := make(chan bool) - sendDone := make(chan bool) - go func() { - defer func() { close(sendDone) }() - for { - var pending *pendingAction - select { - case <-actionsDone: - return - case <-recvDone: - return - case pending = <-appState.pendingActions: - } - - if pending.expiresOn.Sub(time.Now().UTC()) <= 0 { - // to late action already expired - pending.callback(nil, ErrActionTimedOut) - continue - } - - appState.actionsLock.Lock() - err := server.Send(&proto.ActionRequest{ - Id: pending.id, - Name: pending.name, - Params: pending.params, - }) - if err != nil { - // failed to send action; add back to channel to retry on re-connect from the client - appState.actionsLock.Unlock() - appState.pendingActions <- pending - if reportableErr(err) { - s.logger.Debugf("failed to send pending action %s (will retry, after re-connect): %s", pending.id, err) - } - return - } - appState.sentActions[pending.id] = &sentAction{ - callback: pending.callback, - expiresOn: pending.expiresOn, - } - appState.actionsLock.Unlock() - } - }() - - // receive the finished actions - go func() { - for { - response, err := server.Recv() - if err != nil { - if reportableErr(err) { - s.logger.Debugf("actions stream failed to receive data: %s", err) - } - close(recvDone) - return - } - appState.actionsLock.Lock() - action, ok := appState.sentActions[response.Id] - if !ok { - // nothing to do, unknown action request - s.logger.Debugf("actions stream received an unknown action: %s", response.Id) - appState.actionsLock.Unlock() - continue - } - delete(appState.sentActions, response.Id) - appState.actionsLock.Unlock() - - var result map[string]interface{} - err = json.Unmarshal(response.Result, &result) - if err != nil { - action.callback(nil, err) - } else if response.Status == proto.ActionResponse_FAILED { - errStr, ok := result["error"] - if ok { - err = fmt.Errorf("%s", errStr) - } else { - err = fmt.Errorf("unknown error") - } - action.callback(nil, err) - } else { - action.callback(result, nil) - } - } - }() - - <-sendDone - return nil -} - -// WriteConnInfo writes the connection information for the application into the writer. -// -// Note: If the writer implements io.Closer the writer is also closed. -func (as *ApplicationState) WriteConnInfo(w io.Writer) error { - connInfo := &proto.ConnInfo{ - Addr: as.srv.getListenAddr(), - ServerName: as.srvName, - Token: as.token, - CaCert: as.srv.ca.Crt(), - PeerCert: as.cert.Crt, - PeerKey: as.cert.Key, - } - infoBytes, err := protobuf.Marshal(connInfo) - if err != nil { - return errors.New(err, "failed to marshal connection information", errors.TypeApplication) - } - _, err = w.Write(infoBytes) - if err != nil { - return errors.New(err, "failed to write connection information", errors.TypeApplication) - } - closer, ok := w.(io.Closer) - if ok { - _ = closer.Close() - } - return nil -} - -// Stop instructs the application to stop gracefully within the timeout. -// -// Once the application is stopped or the timeout is reached the application is destroyed. Even in the case -// the application times out during stop and ErrApplication -func (as *ApplicationState) Stop(timeout time.Duration) error { - as.checkinLock.Lock() - wasConn := as.checkinDone != nil - cfgIdx := as.statusConfigIdx - as.expected = proto.StateExpected_STOPPING - as.checkinLock.Unlock() - - // send it to the client if its connected, otherwise it will be sent once it connects. - as.sendExpectedState(&proto.StateExpected{ - State: proto.StateExpected_STOPPING, - ConfigStateIdx: cfgIdx, - Config: "", - }, false) - - started := time.Now().UTC() - for { - if time.Now().UTC().Sub(started) > timeout { - as.Destroy() - return ErrApplicationStopTimedOut - } - - as.checkinLock.RLock() - s := as.status - doneChan := as.checkinDone - as.checkinLock.RUnlock() - if (wasConn && doneChan == nil) || (!wasConn && s == proto.StateObserved_STOPPING && doneChan == nil) { - // either occurred: - // * client was connected then disconnected on stop - // * client was not connected; connected; received stopping; then disconnected - as.Destroy() - return nil - } - - <-time.After(500 * time.Millisecond) - } -} - -// Destroy completely removes the application from the server without sending any stop command to the application. -// -// The ApplicationState at this point cannot be used. -func (as *ApplicationState) Destroy() { - as.destroyActionsStream() - as.destroyCheckinStream() - as.srv.apps.Delete(as.token) -} - -// UpdateConfig pushes an updated configuration to the connected application. -func (as *ApplicationState) UpdateConfig(config string) error { - as.checkinLock.RLock() - expected := as.expected - currentCfg := as.expectedConfig - as.checkinLock.RUnlock() - if expected == proto.StateExpected_STOPPING { - return ErrApplicationStopping - } - if config == currentCfg { - // already at that expected config - return nil - } - - as.checkinLock.Lock() - idx := as.expectedConfigIdx + 1 - as.expectedConfigIdx = idx - as.expectedConfig = config - as.checkinLock.Unlock() - - // send it to the client if its connected, otherwise it will be sent once it connects. - as.sendExpectedState(&proto.StateExpected{ - State: expected, - ConfigStateIdx: idx, - Config: config, - }, false) - return nil -} - -// PerformAction synchronously performs an action on the application. -func (as *ApplicationState) PerformAction(name string, params map[string]interface{}, timeout time.Duration) (map[string]interface{}, error) { - paramBytes, err := json.Marshal(params) - if err != nil { - return nil, err - } - id, err := uuid.NewV4() - if err != nil { - return nil, err - } - if !as.actionsConn { - // actions stream destroyed, action cancelled - return nil, ErrActionCancelled - } - - resChan := make(chan actionResult) - as.pendingActions <- &pendingAction{ - id: id.String(), - name: name, - params: paramBytes, - callback: func(m map[string]interface{}, err error) { - resChan <- actionResult{ - result: m, - err: err, - } - }, - expiresOn: time.Now().UTC().Add(timeout), - } - res := <-resChan - return res.result, res.err -} - -// App returns the registered app for the state. -func (as *ApplicationState) App() interface{} { - return as.app -} - -// Expected returns the expected state of the process. -func (as *ApplicationState) Expected() proto.StateExpected_State { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.expected -} - -// Config returns the expected config of the process. -func (as *ApplicationState) Config() string { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.expectedConfig -} - -// Status returns the current observed status. -func (as *ApplicationState) Status() (proto.StateObserved_Status, string, map[string]interface{}) { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.status, as.statusMessage, as.statusPayload -} - -// SetStatus allows the status to be overwritten by the agent. -// -// This status will be overwritten by the client if it reconnects and updates it status. -func (as *ApplicationState) SetStatus(status proto.StateObserved_Status, msg string, payload map[string]interface{}) error { - payloadStr, err := json.Marshal(payload) - if err != nil { - return err - } - as.checkinLock.RLock() - as.status = status - as.statusMessage = msg - as.statusPayload = payload - as.statusPayloadStr = string(payloadStr) - as.checkinLock.RUnlock() - return nil -} - -// SetInputTypes sets the allowed action input types for this application -func (as *ApplicationState) SetInputTypes(inputTypes []string) { - as.checkinLock.Lock() - as.inputTypes = make(map[string]struct{}) - for _, inputType := range inputTypes { - as.inputTypes[inputType] = struct{}{} - } - as.checkinLock.Unlock() -} - -// updateStatus updates the current observed status from the application, sends the expected state back to the -// application if the server expects it to be different then its observed state, and alerts the handler on the -// server when the application status has changed. -func (as *ApplicationState) updateStatus(checkin *proto.StateObserved, waitForReader bool) { - // convert payload from string to JSON - var payload map[string]interface{} - if checkin.Payload != "" { - // ignore the error, if client is sending bad JSON, then payload will just be nil - _ = json.Unmarshal([]byte(checkin.Payload), &payload) - } - - as.checkinLock.Lock() - expectedStatus := as.expected - expectedConfigIdx := as.expectedConfigIdx - expectedConfig := as.expectedConfig - prevStatus := as.status - prevMessage := as.statusMessage - prevPayloadStr := as.statusPayloadStr - as.status = checkin.Status - as.statusMessage = checkin.Message - as.statusPayloadStr = checkin.Payload - as.statusPayload = payload - as.statusConfigIdx = checkin.ConfigStateIdx - as.statusTime = time.Now().UTC() - as.checkinLock.Unlock() - - var expected *proto.StateExpected - if expectedStatus == proto.StateExpected_STOPPING && checkin.Status != proto.StateObserved_STOPPING { - expected = &proto.StateExpected{ - State: expectedStatus, - ConfigStateIdx: checkin.ConfigStateIdx, // stopping always inform that the config it has is correct - Config: "", - } - } else if checkin.ConfigStateIdx != expectedConfigIdx { - expected = &proto.StateExpected{ - State: expectedStatus, - ConfigStateIdx: expectedConfigIdx, - Config: expectedConfig, - } - } - if expected != nil { - as.sendExpectedState(expected, waitForReader) - } - - // alert the service handler that status has changed for the application - if prevStatus != checkin.Status || prevMessage != checkin.Message || prevPayloadStr != checkin.Payload { - as.srv.handler.OnStatusChange(as, checkin.Status, checkin.Message, payload) - } -} - -// sendExpectedState sends the expected status over the pendingExpected channel if the other side is -// waiting for a message. -func (as *ApplicationState) sendExpectedState(expected *proto.StateExpected, waitForReader bool) { - if waitForReader { - as.pendingExpected <- expected - return - } - - select { - case as.pendingExpected <- expected: - default: - } -} - -// destroyActionsStream disconnects the actions stream (prevent reconnect), cancel all pending actions -func (as *ApplicationState) destroyActionsStream() { - as.actionsLock.Lock() - as.actionsConn = false - if as.actionsDone != nil { - close(as.actionsDone) - as.actionsDone = nil - } - as.actionsLock.Unlock() - as.cancelActions() -} - -// flushExpiredActions flushes any expired actions from the pending channel or current processing. -func (as *ApplicationState) flushExpiredActions() { - now := time.Now().UTC() - pendingActions := make([]*pendingAction, 0, len(as.pendingActions)) - for { - done := false - select { - case pending := <-as.pendingActions: - pendingActions = append(pendingActions, pending) - default: - done = true - } - if done { - break - } - } - for _, pending := range pendingActions { - if pending.expiresOn.Sub(now) <= 0 { - pending.callback(nil, ErrActionTimedOut) - } else { - as.pendingActions <- pending - } - } - as.actionsLock.Lock() - for id, pendingResp := range as.sentActions { - if pendingResp.expiresOn.Sub(now) <= 0 { - delete(as.sentActions, id) - pendingResp.callback(nil, ErrActionTimedOut) - } - } - as.actionsLock.Unlock() -} - -// cancelActions cancels all pending or currently processing actions. -func (as *ApplicationState) cancelActions() { - for { - done := false - select { - case pending := <-as.pendingActions: - pending.callback(nil, ErrActionCancelled) - default: - done = true - } - if done { - break - } - } - as.actionsLock.Lock() - for id, pendingResp := range as.sentActions { - delete(as.sentActions, id) - pendingResp.callback(nil, ErrActionCancelled) - } - as.actionsLock.Unlock() -} - -// destroyCheckinStream disconnects the check stream (prevent reconnect). -func (as *ApplicationState) destroyCheckinStream() { - as.checkinLock.Lock() - as.checkinConn = false - if as.checkinDone != nil { - close(as.checkinDone) - as.checkinDone = nil - } - as.checkinLock.Unlock() -} - -// watchdog ensures that the current applications are checking in during the correct intervals of time. -func (s *Server) watchdog() { - defer s.watchdogWG.Done() - for { - t := time.NewTimer(s.watchdogCheckInterval) - select { - case <-s.watchdogDone: - t.Stop() - return - case <-t.C: - } - - now := time.Now().UTC() - s.apps.Range(func(_ interface{}, val interface{}) bool { - serverApp, ok := val.(*ApplicationState) - if !ok { - return true - } - serverApp.checkinLock.RLock() - statusTime := serverApp.statusTime - serverApp.checkinLock.RUnlock() - if now.Sub(statusTime) > s.checkInMinTimeout { - serverApp.checkinLock.Lock() - prevStatus := serverApp.status - s := prevStatus - prevMessage := serverApp.statusMessage - message := prevMessage - if serverApp.status == proto.StateObserved_DEGRADED { - s = proto.StateObserved_FAILED - message = "Missed two check-ins" - serverApp.status = s - serverApp.statusMessage = message - serverApp.statusPayload = nil - serverApp.statusPayloadStr = "" - serverApp.statusTime = now - } else if serverApp.status != proto.StateObserved_FAILED { - s = proto.StateObserved_DEGRADED - message = "Missed last check-in" - serverApp.status = s - serverApp.statusMessage = message - serverApp.statusPayload = nil - serverApp.statusPayloadStr = "" - serverApp.statusTime = now - } - serverApp.checkinLock.Unlock() - if prevStatus != s || prevMessage != message { - serverApp.srv.handler.OnStatusChange(serverApp, s, message, nil) - } - } - serverApp.flushExpiredActions() - return true - }) - } -} - -// getByToken returns an application state by its token. -func (s *Server) getByToken(token string) (*ApplicationState, bool) { - val, ok := s.apps.Load(token) - if ok { - return val.(*ApplicationState), true - } - return nil, false -} - -// getCertificate returns the TLS certificate based on the clientHello or errors if not found. -func (s *Server) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { - var cert *tls.Certificate - s.apps.Range(func(_ interface{}, val interface{}) bool { - sa, ok := val.(*ApplicationState) - if !ok { - return true - } - if sa.srvName == chi.ServerName { - cert = sa.cert.Certificate - return false - } - return true - }) - if cert != nil { - return cert, nil - } - return nil, errors.New("no supported TLS certificate", errors.TypeSecurity) -} - -// getListenAddr returns the listening address of the server. -func (s *Server) getListenAddr() string { - addr := strings.SplitN(s.listenAddr, ":", 2) - if len(addr) == 2 && addr[1] == "0" { - port := s.listener.Addr().(*net.TCPAddr).Port - return fmt.Sprintf("%s:%d", addr[0], port) - } - return s.listenAddr -} - -type pendingAction struct { - id string - name string - params []byte - callback func(map[string]interface{}, error) - expiresOn time.Time -} - -type sentAction struct { - callback func(map[string]interface{}, error) - expiresOn time.Time -} - -type actionResult struct { - result map[string]interface{} - err error -} - -func reportableErr(err error) bool { - if errors.Is(err, io.EOF) { - return false - } - s, ok := status.FromError(err) - if !ok { - return true - } - if s.Code() == codes.Canceled { - return false - } - return true -} - -func genServerName() (string, error) { - u, err := uuid.NewV4() - if err != nil { - return "", err - } - return strings.Replace(u.String(), "-", "", -1), nil -} diff --git a/pkg/core/server/server_test.go b/pkg/core/server/server_test.go deleted file mode 100644 index a2a1bdf4f80..00000000000 --- a/pkg/core/server/server_test.go +++ /dev/null @@ -1,794 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//nolint:dupl // tests are equivalent -package server - -import ( - "context" - "fmt" - "io" - "strings" - "sync" - "testing" - "time" - - "go.elastic.co/apm/apmtest" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent-libs/logp" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - initConfig = "initial_config" - newConfig = "new_config" -) - -func TestServer_Register(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - _, err := srv.Register(app, initConfig) - assert.NoError(t, err) - _, err = srv.Register(app, initConfig) - assert.Equal(t, ErrApplicationAlreadyRegistered, err) -} - -func TestServer_Get(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - expected, err := srv.Register(app, initConfig) - require.NoError(t, err) - observed, ok := srv.Get(app) - assert.True(t, ok) - assert.Equal(t, expected, observed) - _, found := srv.Get(&StubApp{}) - assert.False(t, found) -} - -func TestServer_InitialCheckIn(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // client should get initial check-in - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - - // set status as healthy and running - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - - // application state should be updated - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) -} - -func TestServer_MultiClients(t *testing.T) { - initConfig1 := "initial_config_1" - initConfig2 := "initial_config_2" - app1 := &StubApp{} - app2 := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as1, err := srv.Register(app1, initConfig1) - require.NoError(t, err) - cImpl1 := &StubClientImpl{} - c1 := newClientFromApplicationState(t, as1, cImpl1) - require.NoError(t, c1.Start(context.Background())) - defer c1.Stop() - as2, err := srv.Register(app2, initConfig2) - require.NoError(t, err) - cImpl2 := &StubClientImpl{} - c2 := newClientFromApplicationState(t, as2, cImpl2) - require.NoError(t, c2.Start(context.Background())) - defer c2.Stop() - - // clients should get initial check-ins - require.NoError(t, waitFor(func() error { - if cImpl1.Config() != initConfig1 { - return fmt.Errorf("client never got initial config") - } - return nil - })) - require.NoError(t, waitFor(func() error { - if cImpl2.Config() != initConfig2 { - return fmt.Errorf("client never got initial config") - } - return nil - })) - - // set status differently - err = c1.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - err = c2.Status(proto.StateObserved_DEGRADED, "No upstream connection", nil) - require.NoError(t, err) - - // application states should be updated - assert.NoError(t, waitFor(func() error { - if app1.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - assert.NoError(t, waitFor(func() error { - if app2.Status() != proto.StateObserved_DEGRADED { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) -} - -func TestServer_PreventCheckinStream(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.checkinConn = false // prevent connection to check-in stream - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - return nil - })) -} - -func TestServer_PreventActionsStream(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.actionsConn = false // prevent connection to check-in stream - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - return nil - })) -} - -func TestServer_DestroyPreventConnectAtTLS(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.Destroy() - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - if !strings.Contains(s.Message(), "authentication handshake failed") { - return fmt.Errorf("client didn't get authentication handshake failed error") - } - return nil - })) -} - -func TestServer_UpdateConfig(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // push same config; should not increment config index - preIdx := as.expectedConfigIdx - require.NoError(t, as.UpdateConfig(initConfig)) - assert.Equal(t, preIdx, as.expectedConfigIdx) - - // push new config; should update the client - require.NoError(t, as.UpdateConfig(newConfig)) - assert.Equal(t, preIdx+1, as.expectedConfigIdx) - assert.NoError(t, waitFor(func() error { - if cImpl.Config() != newConfig { - return fmt.Errorf("client never got updated config") - } - return nil - })) -} - -func TestServer_UpdateConfigDisconnected(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // stop the client, then update the config - c.Stop() - require.NoError(t, as.UpdateConfig(newConfig)) - - // reconnect, client should get latest config - require.NoError(t, c.Start(context.Background())) - assert.NoError(t, waitFor(func() error { - if cImpl.Config() != newConfig { - return fmt.Errorf("client never got updated config") - } - return nil - })) -} - -func TestServer_UpdateConfigStopping(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // perform stop try to update config (which will error) - done := make(chan bool) - go func() { - _ = as.Stop(500 * time.Millisecond) - close(done) - }() - err = as.UpdateConfig(newConfig) - assert.Error(t, ErrApplicationStopping, err) - <-done -} - -func TestServer_Stop(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Second * 5) - close(done) - }() - - // process of testing the flow - // 1. server sends stop - // 2. client sends configuring - // 3. server sends stop again - // 4. client sends stopping - // 5. client disconnects - require.NoError(t, waitFor(func() error { - if cImpl.Stop() == 0 { - return fmt.Errorf("client never got expected stop") - } - return nil - })) - err = c.Status(proto.StateObserved_CONFIGURING, "Configuring", nil) - require.NoError(t, err) - require.NoError(t, waitFor(func() error { - if cImpl.Stop() < 1 { - return fmt.Errorf("client never got expected stop again") - } - return nil - })) - err = c.Status(proto.StateObserved_STOPPING, "Stopping", nil) - require.NoError(t, err) - require.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_STOPPING { - return fmt.Errorf("server never updated to stopping") - } - return nil - })) - c.Stop() - <-done - - // no error on stop - assert.NoError(t, stopErr) -} - -func TestServer_StopJustDisconnect(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Second * 5) - close(done) - }() - - // process of testing the flow - // 1. server sends stop - // 2. client disconnects - require.NoError(t, waitFor(func() error { - if cImpl.Stop() == 0 { - return fmt.Errorf("client never got expected stop") - } - return nil - })) - c.Stop() - <-done - - // no error on stop - assert.NoError(t, stopErr) -} - -func TestServer_StopTimeout(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Millisecond) - close(done) - }() - - // don't actually stop the client - - // timeout error on stop - <-done - assert.Equal(t, ErrApplicationStopTimedOut, stopErr) -} - -func TestServer_WatchdogFailApp(t *testing.T) { - checkMinTimeout := 300 * time.Millisecond - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}, func(s *Server) { - s.watchdogCheckInterval = 100 * time.Millisecond - s.checkInMinTimeout = checkMinTimeout - }) - defer srv.Stop() - _, err := srv.Register(app, initConfig) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_DEGRADED { - return fmt.Errorf("app status nevers set to degraded") - } - return nil - })) - assert.Equal(t, "Missed last check-in", app.Message()) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_FAILED { - return fmt.Errorf("app status nevers set to degraded") - } - return nil - })) - assert.Equal(t, "Missed two check-ins", app.Message()) -} - -func TestServer_PerformAction(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}, func(s *Server) { - s.watchdogCheckInterval = 50 * time.Millisecond - }) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl, &EchoAction{}, &SleepAction{}) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // successful action - resp, err := as.PerformAction("echo", map[string]interface{}{ - "echo": "hello world", - }, 5*time.Second) - require.NoError(t, err) - assert.Equal(t, map[string]interface{}{ - "echo": "hello world", - }, resp) - - // action error client-side - _, err = as.PerformAction("echo", map[string]interface{}{ - "bad_param": "hello world", - }, 5*time.Second) - require.Error(t, err) - - // very slow action that times out - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 10*time.Millisecond) - require.Error(t, err) - assert.Equal(t, ErrActionTimedOut, err) - - // try slow action again with the client disconnected (should timeout the same) - c.Stop() - require.NoError(t, waitFor(func() error { - as.actionsLock.RLock() - defer as.actionsLock.RUnlock() - if as.actionsDone != nil { - return fmt.Errorf("client never disconnected the actions stream") - } - return nil - })) - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 10*time.Millisecond) - require.Error(t, err) - assert.Equal(t, ErrActionTimedOut, err) - - // perform action, reconnect client, and then action should be performed - done := make(chan bool) - go func() { - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": 100 * time.Millisecond, - }, 5*time.Second) - close(done) - }() - require.NoError(t, c.Start(context.Background())) - <-done - require.NoError(t, err) - - // perform action, destroy application - done = make(chan bool) - go func() { - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 5*time.Second) - close(done) - }() - <-time.After(100 * time.Millisecond) - as.Destroy() - <-done - require.Error(t, err) - assert.Equal(t, ErrActionCancelled, err) - - // perform action after destroy returns cancelled - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 5*time.Second) - assert.Equal(t, ErrActionCancelled, err) -} - -func newErrorLogger(t *testing.T) *logger.Logger { - t.Helper() - - loggerCfg := logger.DefaultLoggingConfig() - loggerCfg.Level = logp.ErrorLevel - - log, err := logger.NewFromConfig("", loggerCfg, false) - require.NoError(t, err) - return log -} - -func createAndStartServer(t *testing.T, handler Handler, extraConfigs ...func(*Server)) *Server { - t.Helper() - srv, err := New(newErrorLogger(t), "localhost:0", handler, apmtest.DiscardTracer) - require.NoError(t, err) - for _, extra := range extraConfigs { - extra(srv) - } - require.NoError(t, srv.Start()) - return srv -} - -func newClientFromApplicationState(t *testing.T, as *ApplicationState, impl client.StateInterface, actions ...client.Action) client.Client { - t.Helper() - - var err error - var c client.Client - var wg sync.WaitGroup - r, w := io.Pipe() - wg.Add(1) - go func() { - c, err = client.NewFromReader(r, impl, actions...) - wg.Done() - }() - - require.NoError(t, as.WriteConnInfo(w)) - wg.Wait() - require.NoError(t, err) - return c -} - -type StubApp struct { - lock sync.RWMutex - status proto.StateObserved_Status - message string - payload map[string]interface{} -} - -func (a *StubApp) Status() proto.StateObserved_Status { - a.lock.RLock() - defer a.lock.RUnlock() - return a.status -} - -func (a *StubApp) Message() string { - a.lock.RLock() - defer a.lock.RUnlock() - return a.message -} - -type StubHandler struct{} - -func (h *StubHandler) OnStatusChange(as *ApplicationState, status proto.StateObserved_Status, message string, payload map[string]interface{}) { - stub, _ := as.app.(*StubApp) - stub.lock.Lock() - defer stub.lock.Unlock() - stub.status = status - stub.message = message - stub.payload = payload -} - -type StubClientImpl struct { - Lock sync.RWMutex - config string - stop int - error error -} - -func (c *StubClientImpl) Config() string { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.config -} - -func (c *StubClientImpl) Stop() int { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.stop -} - -func (c *StubClientImpl) Error() error { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.error -} - -func (c *StubClientImpl) OnConfig(config string) { - c.Lock.Lock() - defer c.Lock.Unlock() - c.config = config -} - -func (c *StubClientImpl) OnStop() { - c.Lock.Lock() - defer c.Lock.Unlock() - c.stop++ -} - -func (c *StubClientImpl) OnError(err error) { - c.Lock.Lock() - defer c.Lock.Unlock() - c.error = err -} - -type EchoAction struct{} - -func (*EchoAction) Name() string { - return "echo" -} - -func (*EchoAction) Execute(ctx context.Context, request map[string]interface{}) (map[string]interface{}, error) { - echoRaw, ok := request["echo"] - if !ok { - return nil, fmt.Errorf("missing required param of echo") - } - return map[string]interface{}{ - "echo": echoRaw, - }, nil -} - -type SleepAction struct{} - -func (*SleepAction) Name() string { - return "sleep" -} - -func (*SleepAction) Execute(ctx context.Context, request map[string]interface{}) (map[string]interface{}, error) { - sleepRaw, ok := request["sleep"] - if !ok { - return nil, fmt.Errorf("missing required param of slow") - } - sleep, ok := sleepRaw.(float64) - if !ok { - return nil, fmt.Errorf("sleep param must be a number") - } - timer := time.NewTimer(time.Duration(sleep)) - defer timer.Stop() - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-timer.C: - } - - return map[string]interface{}{}, nil -} - -func waitFor(check func() error) error { - started := time.Now() - for { - err := check() - if err == nil { - return nil - } - if time.Since(started) >= 5*time.Second { - return fmt.Errorf("check timed out after 5 second: %w", err) - } - time.Sleep(10 * time.Millisecond) - } -}