diff --git a/Makefile b/Makefile index 9f1ad5922d0dc..e26e3db41e708 100644 --- a/Makefile +++ b/Makefile @@ -118,6 +118,15 @@ ifneq ("$(OS)", "windows") zip -q -A $(BUILDDIR)/teleport endif +# +# make full-ent - Builds Teleport enterprise binaries +# +.PHONY:full-ent +full-ent: +ifneq ("$(OS)", "windows") + @if [ -f e/Makefile ]; then $(MAKE) -C e full; fi +endif + # # make clean - Removed all build artifacts. # @@ -386,6 +395,10 @@ buildbox-grpc: --gofast_out=plugins=grpc:.\ *.proto + cd lib/multiplexer/test && protoc -I=.:$$PROTO_INCLUDE \ + --gofast_out=plugins=grpc:.\ + *.proto + .PHONY: goinstall goinstall: go install $(BUILDFLAGS) \ diff --git a/constants.go b/constants.go index 0ce8480be05c0..17298758eaf10 100644 --- a/constants.go +++ b/constants.go @@ -51,6 +51,13 @@ const ( SSHSessionID = "SSH_SESSION_ID" ) +const ( + // HTTPNextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/1.1.'s TLS setup. + // https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + HTTPNextProtoTLS = "http/1.1" +) + const ( // HTTPSProxy is an environment variable pointing to a HTTPS proxy. HTTPSProxy = "HTTPS_PROXY" @@ -324,6 +331,12 @@ const ( // storage SchemeGCS = "gs" + // GCSTestURI turns on GCS tests + GCSTestURI = "TEST_GCS_URI" + + // AWSRunTests turns on tests executed against AWS directly + AWSRunTests = "TEST_AWS" + // Region is AWS region parameter Region = "region" diff --git a/e b/e index b462ec9ac9b27..ec20619df05c3 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit b462ec9ac9b270513a6fbd716bd6dd5ff855b1f9 +Subproject commit ec20619df05c362702855184296ced13ba59354a diff --git a/go.mod b/go.mod index bccd6cf2df421..b3341dccdc03f 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/gravitational/form v0.0.0-20151109031454-c4048f792f70 github.com/gravitational/kingpin v2.1.11-0.20190130013101-742f2714c145+incompatible github.com/gravitational/license v0.0.0-20180912170534-4f189e3bd6e3 - github.com/gravitational/oxy v0.0.0-20180629203109-e4a7e35311e6 + github.com/gravitational/oxy v0.0.0-20200916204440-3eb06d921a1d github.com/gravitational/reporting v0.0.0-20180907002058-ac7b85c75c4c github.com/gravitational/roundtrip v1.0.0 github.com/gravitational/trace v1.1.6 @@ -90,7 +90,7 @@ require ( google.golang.org/api v0.10.0 google.golang.org/appengine v1.6.3 // indirect google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 - google.golang.org/grpc v1.23.0 + google.golang.org/grpc v1.23.1 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 gopkg.in/yaml.v2 v2.2.8 gotest.tools v2.2.0+incompatible // indirect diff --git a/go.sum b/go.sum index d8ce64c3ff6e1..a72c15c7b7811 100644 --- a/go.sum +++ b/go.sum @@ -170,8 +170,8 @@ github.com/gravitational/license v0.0.0-20180912170534-4f189e3bd6e3 h1:vy9WwUq3H github.com/gravitational/license v0.0.0-20180912170534-4f189e3bd6e3/go.mod h1:jaxS7X2ouXfNd2Pxpybd01qNQK15UmkixKj4vtpp7f8= github.com/gravitational/logrus v0.10.1-0.20171120195323-8ab1e1b91d5f h1:FeloE/ofwzo61I0npMIJlqlrQxNPpbQBoWhzRdoUIAo= github.com/gravitational/logrus v0.10.1-0.20171120195323-8ab1e1b91d5f/go.mod h1:iMtAvwI44N8L2IBvRF4G6NccFxkSYa/Kp8jWVTg3/wQ= -github.com/gravitational/oxy v0.0.0-20180629203109-e4a7e35311e6 h1:244Hc0XnOrqZxR0Fbwt9nwlvM5HnqKWJE+r5EdG6v4A= -github.com/gravitational/oxy v0.0.0-20180629203109-e4a7e35311e6/go.mod h1:ESOxlf8BB2yG3zJ0SfZe9U6wpYu3YF3znxIICg73FYA= +github.com/gravitational/oxy v0.0.0-20200916204440-3eb06d921a1d h1:IsbTjCQ4u5mr30ceWZ4GNcrQkp/Y/J9G+s9prmJm1ac= +github.com/gravitational/oxy v0.0.0-20200916204440-3eb06d921a1d/go.mod h1:ESOxlf8BB2yG3zJ0SfZe9U6wpYu3YF3znxIICg73FYA= github.com/gravitational/reporting v0.0.0-20180907002058-ac7b85c75c4c h1:UwN3jo2EfZSGDchLVqH/EJ2A5GWvKROx3NJNUI6/plg= github.com/gravitational/reporting v0.0.0-20180907002058-ac7b85c75c4c/go.mod h1:rBJeI3JYVzbL7Yw2hYrp4QdKIkncb1pUHo95DyoEGns= github.com/gravitational/roundtrip v1.0.0 h1:eb+0EABfSKC8607CQ4oOyWCm9zVIfio/wW78TjQqLSc= @@ -480,6 +480,8 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= diff --git a/integration/helpers.go b/integration/helpers.go index 7d711c35fa09b..871d22b8ff03c 100644 --- a/integration/helpers.go +++ b/integration/helpers.go @@ -100,7 +100,7 @@ type TeleInstance struct { Nodes []*service.TeleportProcess // UploadEventsC is a channel for upload events - UploadEventsC chan *events.UploadEvent + UploadEventsC chan events.UploadEvent } type User struct { @@ -218,7 +218,7 @@ func NewInstance(cfg InstanceConfig) *TeleInstance { i := &TeleInstance{ Ports: cfg.Ports, Hostname: cfg.NodeName, - UploadEventsC: make(chan *events.UploadEvent, 100), + UploadEventsC: make(chan events.UploadEvent, 100), } secrets := InstanceSecrets{ SiteName: cfg.ClusterName, diff --git a/integration/integration_test.go b/integration/integration_test.go index ea45fcfc6fe68..5dcf1b9f8e8d4 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -247,7 +247,6 @@ func (s *IntSuite) TestAuditOn(c *check.C) { inForwardAgent bool auditSessionsURI string }{ - // normal teleport { inRecordLocation: services.RecordAtNode, @@ -269,6 +268,16 @@ func (s *IntSuite) TestAuditOn(c *check.C) { inForwardAgent: false, auditSessionsURI: c.MkDir(), }, + // normal teleport, sync recording + { + inRecordLocation: services.RecordAtNodeSync, + inForwardAgent: false, + }, + // recording proxy, sync recording + { + inRecordLocation: services.RecordAtProxySync, + inForwardAgent: true, + }, } for _, tt := range tests { @@ -518,7 +527,7 @@ func (s *IntSuite) TestAuditOn(c *check.C) { // the ID of the node. If sessions are being recorded at the proxy, then // SessionServerID should be that of the proxy. expectedServerID := nodeProcess.Config.HostUUID - if tt.inRecordLocation == services.RecordAtProxy { + if services.IsRecordAtProxy(tt.inRecordLocation) { expectedServerID = t.Process.Config.HostUUID } c.Assert(start.GetString(events.SessionServerID), check.Equals, expectedServerID) diff --git a/lib/auth/api.go b/lib/auth/api.go index 3c746f5da251b..902a0b57e90a4 100644 --- a/lib/auth/api.go +++ b/lib/auth/api.go @@ -1,5 +1,5 @@ /* -Copyright 2015 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,9 +20,11 @@ import ( "context" "io" - "github.com/gravitational/trace" - + "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/session" + + "github.com/gravitational/trace" ) // Announcer specifies interface responsible for announcing presence @@ -102,6 +104,8 @@ type AccessPoint interface { ReadAccessPoint // Announcer adds methods used to announce presence Announcer + // Streamer creates and manages audit streams + events.Streamer // Semaphores provides semaphore operations services.Semaphores @@ -161,6 +165,16 @@ type Wrapper struct { NoCache AccessPoint } +// ResumeAuditStream resumes existing audit stream +func (w *Wrapper) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (events.Stream, error) { + return w.NoCache.ResumeAuditStream(ctx, sid, uploadID) +} + +// CreateAuditStream creates new audit stream +func (w *Wrapper) CreateAuditStream(ctx context.Context, sid session.ID) (events.Stream, error) { + return w.NoCache.CreateAuditStream(ctx, sid) +} + // Close closes all associated resources func (w *Wrapper) Close() error { err := w.NoCache.Close() diff --git a/lib/auth/apiserver.go b/lib/auth/apiserver.go index 3d1eb414299b2..788c455e97d72 100644 --- a/lib/auth/apiserver.go +++ b/lib/auth/apiserver.go @@ -49,6 +49,23 @@ type APIConfig struct { SessionService session.Service AuditLog events.IAuditLog Authorizer Authorizer + Emitter events.Emitter + // KeepAlivePeriod defines period between keep alives + KeepAlivePeriod time.Duration + // KeepAliveCount specifies amount of missed keep alives + // to wait for until declaring connection as broken + KeepAliveCount int +} + +// CheckAndSetDefaults checks and sets default values +func (a *APIConfig) CheckAndSetDefaults() error { + if a.KeepAlivePeriod == 0 { + a.KeepAlivePeriod = defaults.ServerKeepAliveTTL + } + if a.KeepAliveCount == 0 { + a.KeepAliveCount = defaults.KeepAliveCountMax + } + return nil } // APIServer implements http API server for AuthServer interface @@ -260,9 +277,7 @@ func (s *APIServer) withAuth(handler HandlerWithAuthFunc) httprouter.Handle { } auth := &AuthWithRoles{ authServer: s.AuthServer, - user: authContext.User, - checker: authContext.Checker, - identity: authContext.Identity, + context: *authContext, sessions: s.SessionService, alog: s.AuthServer.IAuditLog, } @@ -1869,9 +1884,9 @@ func (s *APIServer) emitAuditEvent(auth ClientI, w http.ResponseWriter, r *http. // For backwards compatibility, check if the full event struct has // been sent in the request or just the event type. if req.Event.Name != "" { - err = auth.EmitAuditEvent(req.Event, req.Fields) + err = auth.EmitAuditEventLegacy(req.Event, req.Fields) } else { - err = auth.EmitAuditEvent(events.Event{Name: req.Type}, req.Fields) + err = auth.EmitAuditEventLegacy(events.Event{Name: req.Type}, req.Fields) } if err != nil { return nil, trace.Wrap(err) diff --git a/lib/auth/auth.go b/lib/auth/auth.go index fa5dbfc405b8e..e2916c003f80a 100644 --- a/lib/auth/auth.go +++ b/lib/auth/auth.go @@ -41,6 +41,7 @@ import ( "github.com/gravitational/teleport/lib/limiter" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/services/local" + "github.com/gravitational/teleport/lib/session" "github.com/gravitational/teleport/lib/sshca" "github.com/gravitational/teleport/lib/sshutils" "github.com/gravitational/teleport/lib/tlsca" @@ -89,6 +90,12 @@ func NewAuthServer(cfg *InitConfig, opts ...AuthServerOption) (*AuthServer, erro if cfg.AuditLog == nil { cfg.AuditLog = events.NewDiscardAuditLog() } + if cfg.Emitter == nil { + cfg.Emitter = events.NewDiscardEmitter() + } + if cfg.Streamer == nil { + cfg.Streamer = events.NewDiscardEmitter() + } limiter, err := limiter.NewConnectionsLimiter(limiter.LimiterConfig{ MaxConnections: defaults.LimiterMaxConcurrentSignatures, @@ -109,6 +116,8 @@ func NewAuthServer(cfg *InitConfig, opts ...AuthServerOption) (*AuthServer, erro caSigningAlg: cfg.CASigningAlg, cancelFunc: cancelFunc, closeCtx: closeCtx, + emitter: cfg.Emitter, + streamer: cfg.Streamer, AuthServices: AuthServices{ Trust: cfg.Trust, Presence: cfg.Presence, @@ -217,6 +226,13 @@ type AuthServer struct { cache AuthCache limiter *limiter.ConnectionsLimiter + + // Emitter is events emitter, used to submit discrete events + emitter events.Emitter + + // streamer is events sessionstreamer, used to create continuous + // session related streams + streamer events.Streamer } // SetCache sets cache used by auth server @@ -554,9 +570,9 @@ func (s *AuthServer) generateUserCert(req certRequest) (*certs, error) { Principals: allowedLogins, Usage: req.usage, RouteToCluster: req.routeToCluster, + Traits: req.traits, KubernetesGroups: kubeGroups, KubernetesUsers: kubeUsers, - Traits: req.traits, } subject, err := identity.Subject() if err != nil { @@ -643,7 +659,7 @@ func (s *AuthServer) WithUserLock(username string, authenticateFn func() error) // PreAuthenticatedSignIn is for 2-way authentication methods like U2F where the password is // already checked before issuing the second factor challenge -func (s *AuthServer) PreAuthenticatedSignIn(user string, identity *tlsca.Identity) (services.WebSession, error) { +func (s *AuthServer) PreAuthenticatedSignIn(user string, identity tlsca.Identity) (services.WebSession, error) { roles, traits, err := services.ExtractFromIdentity(s, identity) if err != nil { return nil, trace.Wrap(err) @@ -735,7 +751,7 @@ func (s *AuthServer) CheckU2FSignResponse(user string, response *u2f.SignRespons // ExtendWebSession creates a new web session for a user based on a valid previous sessionID, // method is used to renew the web session for a user -func (s *AuthServer) ExtendWebSession(user string, prevSessionID string, identity *tlsca.Identity) (services.WebSession, error) { +func (s *AuthServer) ExtendWebSession(user string, prevSessionID string, identity tlsca.Identity) (services.WebSession, error) { prevSession, err := s.GetWebSession(user, prevSessionID) if err != nil { return nil, trace.Wrap(err) @@ -837,10 +853,16 @@ func (a *AuthServer) GenerateToken(ctx context.Context, req GenerateTokenRequest user := clientUsername(ctx) for _, role := range req.Roles { if role == teleport.RoleTrustedCluster { - if err := a.EmitAuditEvent(events.TrustedClusterTokenCreate, events.EventFields{ - events.EventUser: user, + if err := a.emitter.EmitAuditEvent(ctx, &events.TrustedClusterTokenCreate{ + Metadata: events.Metadata{ + Type: events.TrustedClusterTokenCreateEvent, + Code: events.TrustedClusterTokenCreateCode, + }, + UserMetadata: events.UserMetadata{ + User: user, + }, }); err != nil { - log.Warnf("Failed to emit trusted cluster token create event: %v", err) + log.WithError(err).Warn("Failed to emit trusted cluster token create event.") } } } @@ -1397,11 +1419,20 @@ func (a *AuthServer) DeleteRole(ctx context.Context, name string) error { return trace.Wrap(err) } - if err := a.EmitAuditEvent(events.RoleDeleted, events.EventFields{ - events.FieldName: name, - events.EventUser: clientUsername(ctx), - }); err != nil { - log.Warnf("Failed to emit role deleted event: %v", err) + err = a.emitter.EmitAuditEvent(a.closeCtx, &events.RoleDelete{ + Metadata: events.Metadata{ + Type: events.RoleDeletedEvent, + Code: events.RoleDeletedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: name, + }, + }) + if err != nil { + log.WithError(err).Warnf("Failed to emit role deleted event.") } return nil @@ -1413,13 +1444,21 @@ func (a *AuthServer) upsertRole(ctx context.Context, role services.Role) error { return trace.Wrap(err) } - if err := a.EmitAuditEvent(events.RoleCreated, events.EventFields{ - events.FieldName: role.GetName(), - events.EventUser: clientUsername(ctx), - }); err != nil { - log.Warnf("Failed to emit role created event: %v", err) + err := a.emitter.EmitAuditEvent(a.closeCtx, &events.RoleCreate{ + Metadata: events.Metadata{ + Type: events.RoleCreatedEvent, + Code: events.RoleCreatedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: role.GetName(), + }, + }) + if err != nil { + log.WithError(err).Warnf("Failed to emit role create event.") } - return nil } @@ -1451,11 +1490,17 @@ func (a *AuthServer) CreateAccessRequest(ctx context.Context, req services.Acces if err := a.DynamicAccess.CreateAccessRequest(ctx, req); err != nil { return trace.Wrap(err) } - err = a.EmitAuditEvent(events.AccessRequestCreated, events.EventFields{ - events.AccessRequestID: req.GetName(), - events.EventUser: req.GetUser(), - events.UserRoles: req.GetRoles(), - events.AccessRequestState: req.GetState().String(), + err = a.emitter.EmitAuditEvent(a.closeCtx, &events.AccessRequestCreate{ + Metadata: events.Metadata{ + Type: events.AccessRequestCreateEvent, + Code: events.AccessRequestCreateCode, + }, + UserMetadata: events.UserMetadata{ + User: req.GetUser(), + }, + Roles: req.GetRoles(), + RequestID: req.GetName(), + RequestState: req.GetState().String(), }) return trace.Wrap(err) } @@ -1464,15 +1509,24 @@ func (a *AuthServer) SetAccessRequestState(ctx context.Context, reqID string, st if err := a.DynamicAccess.SetAccessRequestState(ctx, reqID, state); err != nil { return trace.Wrap(err) } - fields := events.EventFields{ - events.AccessRequestID: reqID, - events.AccessRequestState: state.String(), - events.UpdatedBy: clientUsername(ctx), + event := &events.AccessRequestCreate{ + Metadata: events.Metadata{ + Type: events.AccessRequestUpdateEvent, + Code: events.AccessRequestUpdateCode, + }, + ResourceMetadata: events.ResourceMetadata{ + UpdatedBy: clientUsername(ctx), + }, + RequestID: reqID, + RequestState: state.String(), } if delegator := getDelegator(ctx); delegator != "" { - fields[events.AccessRequestDelegator] = delegator + event.Delegator = delegator + } + err := a.emitter.EmitAuditEvent(a.closeCtx, event) + if err != nil { + log.WithError(err).Warn("Failed to emit access request update event.") } - err := a.EmitAuditEvent(events.AccessRequestUpdated, fields) return trace.Wrap(err) } @@ -1588,6 +1642,43 @@ func (a *AuthServer) GetAllTunnelConnections(opts ...services.MarshalOption) (co return a.GetCache().GetAllTunnelConnections(opts...) } +// CreateAuditStream creates audit event stream +func (a *AuthServer) CreateAuditStream(ctx context.Context, sid session.ID) (events.Stream, error) { + streamer, err := a.modeStreamer() + if err != nil { + return nil, trace.Wrap(err) + } + return streamer.CreateAuditStream(ctx, sid) +} + +// ResumeAuditStream resumes the stream that has been created +func (a *AuthServer) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (events.Stream, error) { + streamer, err := a.modeStreamer() + if err != nil { + return nil, trace.Wrap(err) + } + return streamer.ResumeAuditStream(ctx, sid, uploadID) +} + +// modeStreamer creates streamer based on the event mode +func (a *AuthServer) modeStreamer() (events.Streamer, error) { + clusterConfig, err := a.GetClusterConfig() + if err != nil { + return nil, trace.Wrap(err) + } + mode := clusterConfig.GetSessionRecording() + // In sync mode, auth server forwards session control to the event log + // in addition to sending them and data events to the record storage. + if services.IsRecordSync(mode) { + return events.NewTeeStreamer(a.streamer, a.emitter), nil + } + // In async mode, clients submit session control events + // during the session in addition to writing a local + // session recording to be uploaded at the end of the session, + // so forwarding events here will result in duplicate events. + return a.streamer, nil +} + // authKeepAliver is a keep aliver using auth server directly type authKeepAliver struct { sync.RWMutex diff --git a/lib/auth/auth_test.go b/lib/auth/auth_test.go index 86a7c732ed4e6..c267fadf3b06d 100644 --- a/lib/auth/auth_test.go +++ b/lib/auth/auth_test.go @@ -50,10 +50,10 @@ import ( func TestAPI(t *testing.T) { TestingT(t) } type AuthSuite struct { - bk backend.Backend - a *AuthServer - dataDir string - mockedAuditLog *events.MockAuditLog + bk backend.Backend + a *AuthServer + dataDir string + mockEmitter *events.MockEmitter } var _ = Suite(&AuthSuite{}) @@ -106,8 +106,8 @@ func (s *AuthSuite) SetUpTest(c *C) { err = s.a.SetClusterConfig(services.DefaultClusterConfig()) c.Assert(err, IsNil) - s.mockedAuditLog = events.NewMockAuditLog(0) - s.a.IAuditLog = s.mockedAuditLog + s.mockEmitter = &events.MockEmitter{} + s.a.emitter = s.mockEmitter } func (s *AuthSuite) TearDownTest(c *C) { @@ -466,8 +466,8 @@ func (s *AuthSuite) TestGenerateTokenEventsEmitted(c *C) { // test trusted cluster token emit _, err := s.a.GenerateToken(ctx, GenerateTokenRequest{Roles: teleport.Roles{teleport.RoleTrustedCluster}}) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.TrustedClusterTokenCreate) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.TrustedClusterTokenCreateEvent) + s.mockEmitter.Reset() // test emit with multiple roles _, err = s.a.GenerateToken(ctx, GenerateTokenRequest{Roles: teleport.Roles{ @@ -476,7 +476,7 @@ func (s *AuthSuite) TestGenerateTokenEventsEmitted(c *C) { teleport.RoleAuth, }}) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.TrustedClusterTokenCreate) + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.TrustedClusterTokenCreateEvent) } func (s *AuthSuite) TestBuildRolesInvalid(c *C) { @@ -707,35 +707,35 @@ func (s *AuthSuite) TestCreateAndUpdateUserEventsEmitted(c *C) { }) err = s.a.CreateUser(ctx, user) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.UserCreate) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.EventUser], Equals, "some-auth-user") - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.UserCreateEvent) + c.Assert(s.mockEmitter.LastEvent().(*events.UserCreate).User, Equals, "some-auth-user") + s.mockEmitter.Reset() // test create user with existing user err = s.a.CreateUser(ctx, user) c.Assert(trace.IsAlreadyExists(err), Equals, true) - c.Assert(s.mockedAuditLog.EmittedEvent, IsNil) + c.Assert(s.mockEmitter.LastEvent(), IsNil) // test createdBy gets set to default user2, err := services.NewUser("some-other-user") c.Assert(err, IsNil) err = s.a.CreateUser(ctx, user2) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.EventUser], Equals, teleport.UserSystem) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().(*events.UserCreate).User, Equals, teleport.UserSystem) + s.mockEmitter.Reset() // test update on non-existent user user3, err := services.NewUser("non-existent-user") c.Assert(err, IsNil) err = s.a.UpdateUser(ctx, user3) c.Assert(trace.IsNotFound(err), Equals, true) - c.Assert(s.mockedAuditLog.EmittedEvent, IsNil) + c.Assert(s.mockEmitter.LastEvent(), IsNil) // test update user err = s.a.UpdateUser(ctx, user) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.UserUpdate) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.EventUser], Equals, teleport.UserSystem) + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.UserUpdatedEvent) + c.Assert(s.mockEmitter.LastEvent().(*events.UserCreate).User, Equals, teleport.UserSystem) } func (s *AuthSuite) TestUpsertDeleteRoleEventsEmitted(c *C) { @@ -749,9 +749,9 @@ func (s *AuthSuite) TestUpsertDeleteRoleEventsEmitted(c *C) { err = s.a.upsertRole(ctx, roleTest) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.RoleCreated) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.FieldName], Equals, "test") - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.RoleCreatedEvent) + c.Assert(s.mockEmitter.LastEvent().(*events.RoleCreate).Name, Equals, "test") + s.mockEmitter.Reset() roleRetrieved, err := s.a.GetRole("test") c.Assert(err, IsNil) @@ -761,16 +761,16 @@ func (s *AuthSuite) TestUpsertDeleteRoleEventsEmitted(c *C) { err = s.a.upsertRole(ctx, roleTest) c.Assert(err, IsNil) c.Assert(roleRetrieved.Equals(roleTest), Equals, true) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.RoleCreated) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.FieldName], Equals, "test") - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.RoleCreatedEvent) + c.Assert(s.mockEmitter.LastEvent().(*events.RoleCreate).Name, Equals, "test") + s.mockEmitter.Reset() // test delete role err = s.a.DeleteRole(ctx, "test") c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.RoleDeleted) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.FieldName], Equals, "test") - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.RoleDeletedEvent) + c.Assert(s.mockEmitter.LastEvent().(*events.RoleDelete).Name, Equals, "test") + s.mockEmitter.Reset() // test role has been deleted roleRetrieved, err = s.a.GetRole("test") @@ -780,12 +780,12 @@ func (s *AuthSuite) TestUpsertDeleteRoleEventsEmitted(c *C) { // test role that doesn't exist err = s.a.DeleteRole(ctx, "test") c.Assert(trace.IsNotFound(err), Equals, true) - c.Assert(s.mockedAuditLog.EmittedEvent, IsNil) + c.Assert(s.mockEmitter.LastEvent(), IsNil) } func (s *AuthSuite) TestTrustedClusterCRUDEventEmitted(c *C) { ctx := context.Background() - s.a.IAuditLog = s.mockedAuditLog + s.a.emitter = s.mockEmitter // set up existing cluster to bypass switch cases that // makes a network request when creating new clusters @@ -809,21 +809,21 @@ func (s *AuthSuite) TestTrustedClusterCRUDEventEmitted(c *C) { _, err = s.a.UpsertTrustedCluster(ctx, tc) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.TrustedClusterCreate) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.TrustedClusterCreateEvent) + s.mockEmitter.Reset() // test create event for switch case: when tc exists but enabled is true tc.SetEnabled(true) _, err = s.a.UpsertTrustedCluster(ctx, tc) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.TrustedClusterCreate) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.TrustedClusterCreateEvent) + s.mockEmitter.Reset() // test delete event err = s.a.DeleteTrustedCluster(ctx, "test") c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.TrustedClusterDelete) + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.TrustedClusterDeleteEvent) } func (s *AuthSuite) TestGithubConnectorCRUDEventsEmitted(c *C) { @@ -832,19 +832,19 @@ func (s *AuthSuite) TestGithubConnectorCRUDEventsEmitted(c *C) { github := services.NewGithubConnector("test", services.GithubConnectorSpecV3{}) err := s.a.upsertGithubConnector(ctx, github) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.GithubConnectorCreated) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.GithubConnectorCreatedEvent) + s.mockEmitter.Reset() // test github update event err = s.a.upsertGithubConnector(ctx, github) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.GithubConnectorCreated) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.GithubConnectorCreatedEvent) + s.mockEmitter.Reset() // test github delete event err = s.a.deleteGithubConnector(ctx, "test") c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.GithubConnectorDeleted) + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.GithubConnectorDeletedEvent) } func (s *AuthSuite) TestOIDCConnectorCRUDEventsEmitted(c *C) { @@ -853,19 +853,19 @@ func (s *AuthSuite) TestOIDCConnectorCRUDEventsEmitted(c *C) { oidc := services.NewOIDCConnector("test", services.OIDCConnectorSpecV2{ClientID: "a"}) err := s.a.UpsertOIDCConnector(ctx, oidc) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.OIDCConnectorCreated) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.OIDCConnectorCreatedEvent) + s.mockEmitter.Reset() // test oidc update event err = s.a.UpsertOIDCConnector(ctx, oidc) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.OIDCConnectorCreated) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.OIDCConnectorCreatedEvent) + s.mockEmitter.Reset() // test oidc delete event err = s.a.DeleteOIDCConnector(ctx, "test") c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.OIDCConnectorDeleted) + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.OIDCConnectorDeletedEvent) } func (s *AuthSuite) TestSAMLConnectorCRUDEventsEmitted(c *C) { @@ -896,17 +896,17 @@ func (s *AuthSuite) TestSAMLConnectorCRUDEventsEmitted(c *C) { err = s.a.UpsertSAMLConnector(ctx, saml) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.SAMLConnectorCreated) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.SAMLConnectorCreatedEvent) + s.mockEmitter.Reset() // test saml update event err = s.a.UpsertSAMLConnector(ctx, saml) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.SAMLConnectorCreated) - s.mockedAuditLog.Reset() + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.SAMLConnectorCreatedEvent) + s.mockEmitter.Reset() // test saml delete event err = s.a.DeleteSAMLConnector(ctx, "test") c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.SAMLConnectorDeleted) + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.SAMLConnectorDeletedEvent) } diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index 7cd56447be45f..079e55bb5d6e1 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -1,5 +1,5 @@ /* -Copyright 2015-2018 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,7 +27,6 @@ import ( "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/session" - "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/teleport/lib/wrappers" @@ -41,29 +40,33 @@ import ( // methods that focuses on authorizing every request type AuthWithRoles struct { authServer *AuthServer - checker services.AccessChecker - user services.User sessions session.Service alog events.IAuditLog - identity tlsca.Identity + // context holds authorization context + context AuthContext +} + +// Context is closed when the auth server shuts down +func (a *AuthWithRoles) Context() context.Context { + return a.authServer.closeCtx } func (a *AuthWithRoles) actionWithContext(ctx *services.Context, namespace string, resource string, action string) error { - return a.checker.CheckAccessToRule(ctx, namespace, resource, action, false) + return a.context.Checker.CheckAccessToRule(ctx, namespace, resource, action, false) } func (a *AuthWithRoles) action(namespace string, resource string, action string) error { - return a.checker.CheckAccessToRule(&services.Context{User: a.user}, namespace, resource, action, false) + return a.context.Checker.CheckAccessToRule(&services.Context{User: a.context.User}, namespace, resource, action, false) } // currentUserAction is a special checker that allows certain actions for users // even if they are not admins, e.g. update their own passwords, // or generate certificates, otherwise it will require admin privileges func (a *AuthWithRoles) currentUserAction(username string) error { - if a.hasLocalUserRole(a.checker) && username == a.user.GetName() { + if a.hasLocalUserRole(a.context.Checker) && username == a.context.User.GetName() { return nil } - return a.checker.CheckAccessToRule(&services.Context{User: a.user}, + return a.context.Checker.CheckAccessToRule(&services.Context{User: a.context.User}, defaults.Namespace, services.KindUser, services.VerbCreate, true) } @@ -72,8 +75,8 @@ func (a *AuthWithRoles) currentUserAction(username string) error { // If not, it checks if the requester has the meta KindAuthConnector access // (which grants access to all connectors). func (a *AuthWithRoles) authConnectorAction(namespace string, resource string, verb string) error { - if err := a.checker.CheckAccessToRule(&services.Context{User: a.user}, namespace, resource, verb, false); err != nil { - if err := a.checker.CheckAccessToRule(&services.Context{User: a.user}, namespace, services.KindAuthConnector, verb, false); err != nil { + if err := a.context.Checker.CheckAccessToRule(&services.Context{User: a.context.User}, namespace, resource, verb, false); err != nil { + if err := a.context.Checker.CheckAccessToRule(&services.Context{User: a.context.User}, namespace, services.KindAuthConnector, verb, false); err != nil { return trace.Wrap(err) } } @@ -83,7 +86,7 @@ func (a *AuthWithRoles) authConnectorAction(namespace string, resource string, v // hasBuiltinRole checks the type of the role set returned and the name. // Returns true if role set is builtin and the name matches. func (a *AuthWithRoles) hasBuiltinRole(name string) bool { - return hasBuiltinRole(a.checker, name) + return hasBuiltinRole(a.context.Checker, name) } // hasBuiltinRole checks the type of the role set returned and the name. @@ -102,10 +105,10 @@ func hasBuiltinRole(checker services.AccessChecker, name string) bool { // hasRemoteBuiltinRole checks the type of the role set returned and the name. // Returns true if role set is remote builtin and the name matches. func (a *AuthWithRoles) hasRemoteBuiltinRole(name string) bool { - if _, ok := a.checker.(RemoteBuiltinRoleSet); !ok { + if _, ok := a.context.Checker.(RemoteBuiltinRoleSet); !ok { return false } - if !a.checker.HasRole(name) { + if !a.context.Checker.HasRole(name) { return false } @@ -204,7 +207,7 @@ func (a *AuthWithRoles) RotateExternalCertAuthority(ca services.CertAuthority) e if ca == nil { return trace.BadParameter("missing certificate authority") } - ctx := &services.Context{User: a.user, Resource: ca} + ctx := &services.Context{User: a.context.User, Resource: ca} if err := a.actionWithContext(ctx, defaults.Namespace, services.KindCertAuthority, services.VerbRotate); err != nil { return trace.Wrap(err) } @@ -216,7 +219,7 @@ func (a *AuthWithRoles) UpsertCertAuthority(ca services.CertAuthority) error { if ca == nil { return trace.BadParameter("missing certificate authority") } - ctx := &services.Context{User: a.user, Resource: ca} + ctx := &services.Context{User: a.context.User, Resource: ca} if err := a.actionWithContext(ctx, defaults.Namespace, services.KindCertAuthority, services.VerbCreate); err != nil { return trace.Wrap(err) } @@ -332,10 +335,10 @@ func (a *AuthWithRoles) GenerateServerKeys(req GenerateServerKeysRequest) (*Pack return nil, trace.Wrap(err) } // username is hostID + cluster name, so make sure server requests new keys for itself - if a.user.GetName() != HostFQDN(req.HostID, clusterName) { - return nil, trace.AccessDenied("username mismatch %q and %q", a.user.GetName(), HostFQDN(req.HostID, clusterName)) + if a.context.User.GetName() != HostFQDN(req.HostID, clusterName) { + return nil, trace.AccessDenied("username mismatch %q and %q", a.context.User.GetName(), HostFQDN(req.HostID, clusterName)) } - existingRoles, err := teleport.NewRoles(a.user.GetRoles()) + existingRoles, err := teleport.NewRoles(a.context.User.GetRoles()) if err != nil { return nil, trace.Wrap(err) } @@ -375,7 +378,7 @@ func (a *AuthWithRoles) KeepAliveNode(ctx context.Context, handle services.KeepA if err != nil { return trace.Wrap(err) } - serverName, err := ExtractHostID(a.user.GetName(), clusterName) + serverName, err := ExtractHostID(a.context.User.GetName(), clusterName) if err != nil { return trace.AccessDenied("[10] access denied") } @@ -490,7 +493,7 @@ func (a *AuthWithRoles) filterNodes(nodes []services.Server) ([]services.Server, return nodes, nil } - roleset, err := services.FetchRoles(a.user.GetRoles(), a.authServer, a.user.GetTraits()) + roleset, err := services.FetchRoles(a.context.User.GetRoles(), a.authServer, a.context.User.GetTraits()) if err != nil { return nil, trace.Wrap(err) } @@ -557,7 +560,7 @@ func (a *AuthWithRoles) GetNodes(namespace string, opts ...services.MarshalOptio elapsedFilter := time.Since(startFilter) log.WithFields(logrus.Fields{ - "user": a.user.GetName(), + "user": a.context.User.GetName(), "elapsed_fetch": elapsedFetch, "elapsed_filter": elapsedFilter, }).Debugf( @@ -740,7 +743,7 @@ func (a *AuthWithRoles) PreAuthenticatedSignIn(user string) (services.WebSession if err := a.currentUserAction(user); err != nil { return nil, trace.Wrap(err) } - return a.authServer.PreAuthenticatedSignIn(user, &a.identity) + return a.authServer.PreAuthenticatedSignIn(user, a.context.Identity.GetIdentity()) } func (a *AuthWithRoles) GetU2FSignRequest(user string, password []byte) (*u2f.SignRequest, error) { @@ -760,7 +763,7 @@ func (a *AuthWithRoles) ExtendWebSession(user, prevSessionID string) (services.W if err := a.currentUserAction(user); err != nil { return nil, trace.Wrap(err) } - return a.authServer.ExtendWebSession(user, prevSessionID, &a.identity) + return a.authServer.ExtendWebSession(user, prevSessionID, a.context.Identity.GetIdentity()) } func (a *AuthWithRoles) GetWebSessionInfo(user string, sid string) (services.WebSession, error) { @@ -798,8 +801,8 @@ func (a *AuthWithRoles) CreateAccessRequest(ctx context.Context, req services.Ac } } // Ensure that an access request cannot outlive the identity that creates it. - if req.GetAccessExpiry().Before(a.authServer.GetClock().Now()) || req.GetAccessExpiry().After(a.identity.Expires) { - req.SetAccessExpiry(a.identity.Expires) + if req.GetAccessExpiry().Before(a.authServer.GetClock().Now()) || req.GetAccessExpiry().After(a.context.Identity.GetIdentity().Expires) { + req.SetAccessExpiry(a.context.Identity.GetIdentity().Expires) } return a.authServer.CreateAccessRequest(ctx, req) } @@ -886,15 +889,21 @@ func (a *AuthWithRoles) GetUsers(withSecrets bool) ([]services.User, error) { // TODO(fspmarshall): replace admin requirement with VerbReadWithSecrets once we've // migrated to that model. if !a.hasBuiltinRole(string(teleport.RoleAdmin)) { - err := trace.AccessDenied("user %q requested access to all users with secrets", a.user.GetName()) + err := trace.AccessDenied("user %q requested access to all users with secrets", a.context.User.GetName()) log.Warning(err) - if err := a.authServer.EmitAuditEvent(events.UserLocalLoginFailure, events.EventFields{ - events.LoginMethod: events.LoginMethodClientCert, - events.AuthAttemptSuccess: false, - // log the original internal error in audit log - events.AuthAttemptErr: trace.Unwrap(err).Error(), + if err := a.authServer.emitter.EmitAuditEvent(a.authServer.closeCtx, &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + Code: events.UserLocalLoginFailureCode, + }, + Method: events.LoginMethodClientCert, + Status: events.Status{ + Success: false, + Error: trace.Unwrap(err).Error(), + UserMessage: err.Error(), + }, }); err != nil { - log.Warnf("Failed to emit local login failure event: %v", err) + log.WithError(err).Warn("Failed to emit local login failure event.") } return nil, trace.AccessDenied("this request can be only executed by an admin") } @@ -914,15 +923,21 @@ func (a *AuthWithRoles) GetUser(name string, withSecrets bool) (services.User, e // TODO(fspmarshall): replace admin requirement with VerbReadWithSecrets once we've // migrated to that model. if !a.hasBuiltinRole(string(teleport.RoleAdmin)) { - err := trace.AccessDenied("user %q requested access to user %q with secrets", a.user.GetName(), name) + err := trace.AccessDenied("user %q requested access to user %q with secrets", a.context.User.GetName(), name) log.Warning(err) - if err := a.authServer.EmitAuditEvent(events.UserLocalLoginFailure, events.EventFields{ - events.LoginMethod: events.LoginMethodClientCert, - events.AuthAttemptSuccess: false, - // log the original internal error in audit log - events.AuthAttemptErr: trace.Unwrap(err).Error(), + if err := a.authServer.emitter.EmitAuditEvent(a.authServer.closeCtx, &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + Code: events.UserLocalLoginFailureCode, + }, + Method: events.LoginMethodClientCert, + Status: events.Status{ + Success: false, + Error: trace.Unwrap(err).Error(), + UserMessage: err.Error(), + }, }); err != nil { - log.Warnf("Failed to emit local login failure event: %v", err) + log.WithError(err).Warn("Failed to emit local login failure event.") } return nil, trace.AccessDenied("this request can be only executed by an admin") } @@ -986,34 +1001,41 @@ func (a *AuthWithRoles) GenerateUserCerts(ctx context.Context, req proto.UserCer } roles = user.GetRoles() traits = user.GetTraits() - case req.Username == a.user.GetName(): + case req.Username == a.context.User.GetName(): // user is requesting TTL for themselves, // limit the TTL to the duration of the session, to prevent // users renewing their certificates forever - if a.identity.Expires.IsZero() { - log.Warningf("Encountered identity with no expiry: %v and denied request. Must be internal logic error.", a.identity) + expires := a.context.Identity.GetIdentity().Expires + if expires.IsZero() { + log.Warningf("Encountered identity with no expiry: %v and denied request. Must be internal logic error.", a.context.Identity) return nil, trace.AccessDenied("access denied") } - req.Expires = a.identity.Expires + req.Expires = expires if req.Expires.Before(a.authServer.GetClock().Now()) { return nil, trace.AccessDenied("access denied: client credentials have expired, please relogin.") } // If the user is generating a certificate, the roles and traits come from // the logged in identity. - roles, traits, err = services.ExtractFromIdentity(a.authServer, &a.identity) + roles, traits, err = services.ExtractFromIdentity(a.authServer, a.context.Identity.GetIdentity()) if err != nil { return nil, trace.Wrap(err) } default: - err := trace.AccessDenied("user %q has requested to generate certs for %q.", a.user.GetName(), req.Username) + err := trace.AccessDenied("user %q has requested to generate certs for %q.", a.context.User.GetName(), req.Username) log.Warning(err) - if err := a.authServer.EmitAuditEvent(events.UserLocalLoginFailure, events.EventFields{ - events.LoginMethod: events.LoginMethodClientCert, - events.AuthAttemptSuccess: false, - // log the original internal error in audit log - events.AuthAttemptErr: trace.Unwrap(err).Error(), + if err := a.authServer.emitter.EmitAuditEvent(a.Context(), &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + Code: events.UserLocalLoginFailureCode, + }, + Method: events.LoginMethodClientCert, + Status: events.Status{ + Success: false, + Error: trace.Unwrap(err).Error(), + UserMessage: err.Error(), + }, }); err != nil { - log.Warnf("Failed to emit local login failure event: %v", err) + log.WithError(err).Warn("Failed to emit local login failure event.") } // this error is vague on purpose, it should not happen unless someone is trying something out of loop return nil, trace.AccessDenied("this request can be only executed by an admin") @@ -1100,7 +1122,6 @@ func (a *AuthWithRoles) CreateResetPasswordToken(ctx context.Context, req Create if err := a.action(defaults.Namespace, services.KindUser, services.VerbUpdate); err != nil { return nil, trace.Wrap(err) } - return a.authServer.CreateResetPasswordToken(ctx, req) } @@ -1124,7 +1145,6 @@ func (a *AuthWithRoles) CreateUser(ctx context.Context, user services.User) erro if err := a.action(defaults.Namespace, services.KindUser, services.VerbCreate); err != nil { return trace.Wrap(err) } - return a.authServer.CreateUser(ctx, user) } @@ -1149,7 +1169,7 @@ func (a *AuthWithRoles) UpsertUser(u services.User) error { createdBy := u.GetCreatedBy() if createdBy.IsEmpty() { u.SetCreatedBy(services.CreatedBy{ - User: services.UserRef{Name: a.user.GetName()}, + User: services.UserRef{Name: a.context.User.GetName()}, }) } return a.authServer.UpsertUser(u) @@ -1341,14 +1361,126 @@ func (a *AuthWithRoles) ValidateGithubAuthCallback(q url.Values) (*GithubAuthRes return a.authServer.ValidateGithubAuthCallback(q) } -func (a *AuthWithRoles) EmitAuditEvent(event events.Event, fields events.EventFields) error { +// EmitAuditEvent emits a single audit event +func (a *AuthWithRoles) EmitAuditEvent(ctx context.Context, event events.AuditEvent) error { + if err := a.action(defaults.Namespace, services.KindEvent, services.VerbCreate); err != nil { + return trace.Wrap(err) + } + role, ok := a.context.Identity.(BuiltinRole) + if !ok || !role.IsServer() { + return trace.AccessDenied("this request can be only executed by proxy, node or auth") + } + err := events.ValidateServerMetadata(event, role.GetServerID()) + if err != nil { + // TODO: this should be a proper audit event + // notifying about access violation + log.Warningf("Rejecting audit event %v(%q) from %q: %v. The client is attempting to "+ + "submit events for an identity other than the one on its x509 certificate.", + event.GetType(), event.GetID(), role.GetServerID(), err) + // this message is sparse on purpose to avoid conveying extra data to an attacker + return trace.AccessDenied("failed to validate event metadata") + } + return a.authServer.emitter.EmitAuditEvent(ctx, event) +} + +// CreateAuditStream creates audit event stream +func (a *AuthWithRoles) CreateAuditStream(ctx context.Context, sid session.ID) (events.Stream, error) { + if err := a.action(defaults.Namespace, services.KindEvent, services.VerbCreate); err != nil { + return nil, trace.Wrap(err) + } + if err := a.action(defaults.Namespace, services.KindEvent, services.VerbUpdate); err != nil { + return nil, trace.Wrap(err) + } + role, ok := a.context.Identity.(BuiltinRole) + if !ok || !role.IsServer() { + return nil, trace.AccessDenied("this request can be only executed by proxy, node or auth") + } + stream, err := a.authServer.CreateAuditStream(ctx, sid) + if err != nil { + return nil, trace.Wrap(err) + } + return &streamWithRoles{ + stream: stream, + a: a, + serverID: role.GetServerID(), + }, nil +} + +// ResumeAuditStream resumes the stream that has been created +func (a *AuthWithRoles) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (events.Stream, error) { + if err := a.action(defaults.Namespace, services.KindEvent, services.VerbCreate); err != nil { + return nil, trace.Wrap(err) + } + if err := a.action(defaults.Namespace, services.KindEvent, services.VerbUpdate); err != nil { + return nil, trace.Wrap(err) + } + role, ok := a.context.Identity.(BuiltinRole) + if !ok || !role.IsServer() { + return nil, trace.AccessDenied("this request can be only executed by proxy, node or auth") + } + stream, err := a.authServer.ResumeAuditStream(ctx, sid, uploadID) + if err != nil { + return nil, trace.Wrap(err) + } + return &streamWithRoles{ + stream: stream, + a: a, + serverID: role.GetServerID(), + }, nil +} + +// streamWithRoles verifies every event +type streamWithRoles struct { + a *AuthWithRoles + serverID string + stream events.Stream +} + +// Status returns channel receiving updates about stream status +// last event index that was uploaded and upload ID +func (s *streamWithRoles) Status() <-chan events.StreamStatus { + return s.stream.Status() +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (s *streamWithRoles) Done() <-chan struct{} { + return s.stream.Done() +} + +// Complete closes the stream and marks it finalized +func (s *streamWithRoles) Complete(ctx context.Context) error { + return s.stream.Complete(ctx) +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (s *streamWithRoles) Close(ctx context.Context) error { + return s.stream.Close(ctx) +} + +func (s *streamWithRoles) EmitAuditEvent(ctx context.Context, event events.AuditEvent) error { + err := events.ValidateServerMetadata(event, s.serverID) + if err != nil { + // TODO: this should be a proper audit event + // notifying about access violation + log.Warningf("Rejecting audit event %v from %v: %v. A node is attempting to "+ + "submit events for an identity other than the one on its x509 certificate.", + event.GetID(), s.serverID, err) + // this message is sparse on purpose to avoid conveying extra data to an attacker + return trace.AccessDenied("failed to validate event metadata") + } + return s.stream.EmitAuditEvent(ctx, event) +} + +func (a *AuthWithRoles) EmitAuditEventLegacy(event events.Event, fields events.EventFields) error { if err := a.action(defaults.Namespace, services.KindEvent, services.VerbCreate); err != nil { return trace.Wrap(err) } if err := a.action(defaults.Namespace, services.KindEvent, services.VerbUpdate); err != nil { return trace.Wrap(err) } - return a.alog.EmitAuditEvent(event, fields) + return a.alog.EmitAuditEventLegacy(event, fields) } func (a *AuthWithRoles) PostSessionSlice(slice events.SessionSlice) error { @@ -1477,7 +1609,7 @@ func (a *AuthWithRoles) GetRole(name string) (services.Role, error) { // Current-user exception: we always allow users to read roles // that they hold. This requirement is checked first to avoid // misleading denial messages in the logs. - if !utils.SliceContainsStr(a.user.GetRoles(), name) { + if !utils.SliceContainsStr(a.context.User.GetRoles(), name) { if err := a.action(defaults.Namespace, services.KindRole, services.VerbRead); err != nil { return nil, trace.Wrap(err) } @@ -1845,8 +1977,7 @@ func NewAdminAuthServer(authServer *AuthServer, sessions session.Service, alog e } return &AuthWithRoles{ authServer: authServer, - checker: ctx.Checker, - user: ctx.User, + context: *ctx, alog: alog, sessions: sessions, }, nil diff --git a/lib/auth/clt.go b/lib/auth/clt.go index e6d92f3839db5..7a4d49db157b1 100644 --- a/lib/auth/clt.go +++ b/lib/auth/clt.go @@ -1,5 +1,5 @@ /* -Copyright 2015-2019 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ package auth import ( "bytes" + "compress/gzip" "context" "crypto/tls" "encoding/hex" @@ -55,10 +56,15 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + ggzip "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" ) +func init() { + ggzip.SetLevel(gzip.BestSpeed) +} + const ( // CurrentVersion is a current API version CurrentVersion = services.V2 @@ -307,13 +313,14 @@ func (c *Client) grpc() (proto.AuthServiceClient, error) { }) tlsConfig := c.TLS.Clone() tlsConfig.NextProtos = []string{http2.NextProtoTLS} - log.Debugf("GRPC(): keep alive %v count: %v.", c.KeepAlivePeriod, c.KeepAliveCount) + log.Debugf("GRPC(CLIENT): keep alive %v count: %v.", c.KeepAlivePeriod, c.KeepAliveCount) conn, err := grpc.Dial(teleport.APIDomain, dialer, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: c.KeepAlivePeriod, - Timeout: c.KeepAlivePeriod * time.Duration(c.KeepAliveCount), + Time: c.KeepAlivePeriod, + Timeout: c.KeepAlivePeriod * time.Duration(c.KeepAliveCount), + PermitWithoutStream: true, }), ) if err != nil { @@ -2037,8 +2044,160 @@ func (c *Client) ValidateGithubAuthCallback(q url.Values) (*GithubAuthResponse, return &response, nil } -// EmitAuditEvent sends an auditable event to the auth server (part of evets.IAuditLog interface) -func (c *Client) EmitAuditEvent(event events.Event, fields events.EventFields) error { +// ResumeAuditStream resumes existing audit stream +func (c *Client) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (events.Stream, error) { + return c.createOrResumeAuditStream(ctx, proto.AuditStreamRequest{ + Request: &proto.AuditStreamRequest_ResumeStream{ + ResumeStream: &proto.ResumeStream{ + SessionID: string(sid), + UploadID: uploadID, + }}, + }) +} + +// CreateAuditStream creates new audit stream +func (c *Client) CreateAuditStream(ctx context.Context, sid session.ID) (events.Stream, error) { + return c.createOrResumeAuditStream(ctx, proto.AuditStreamRequest{ + Request: &proto.AuditStreamRequest_CreateStream{ + CreateStream: &proto.CreateStream{SessionID: string(sid)}}, + }) +} + +// createOrResumeAuditStream creates or resumes audit stream +func (c *Client) createOrResumeAuditStream(ctx context.Context, request proto.AuditStreamRequest) (events.Stream, error) { + clt, err := c.grpc() + if err != nil { + return nil, trace.Wrap(err) + } + closeCtx, cancel := context.WithCancel(ctx) + stream, err := clt.CreateAuditStream(closeCtx, grpc.UseCompressor(ggzip.Name)) + if err != nil { + cancel() + return nil, trail.FromGRPC(err) + } + s := &auditStreamer{ + stream: stream, + statusCh: make(chan events.StreamStatus, 1), + closeCtx: closeCtx, + cancel: cancel, + } + go s.recv() + err = s.stream.Send(&request) + if err != nil { + return nil, trace.NewAggregate(s.Close(ctx), trail.FromGRPC(err)) + } + return s, nil +} + +type auditStreamer struct { + statusCh chan events.StreamStatus + sync.RWMutex + stream proto.AuthService_CreateAuditStreamClient + err error + closeCtx context.Context + cancel context.CancelFunc +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (s *auditStreamer) Close(ctx context.Context) error { + defer s.closeWithError(nil) + return trail.FromGRPC(s.stream.Send(&proto.AuditStreamRequest{ + Request: &proto.AuditStreamRequest_FlushAndCloseStream{ + FlushAndCloseStream: &proto.FlushAndCloseStream{}, + }, + })) +} + +// Complete completes stream +func (s *auditStreamer) Complete(ctx context.Context) error { + return trail.FromGRPC(s.stream.Send(&proto.AuditStreamRequest{ + Request: &proto.AuditStreamRequest_CompleteStream{ + CompleteStream: &proto.CompleteStream{}, + }, + })) +} + +// Status returns channel receiving updates about stream status +// last event index that was uploaded and upload ID +func (s *auditStreamer) Status() <-chan events.StreamStatus { + return s.statusCh +} + +// EmitAuditEvent emits audit event +func (s *auditStreamer) EmitAuditEvent(ctx context.Context, event events.AuditEvent) error { + oneof, err := events.ToOneOf(event) + if err != nil { + return trace.Wrap(err) + } + err = trail.FromGRPC(s.stream.Send(&proto.AuditStreamRequest{ + Request: &proto.AuditStreamRequest_Event{Event: oneof}, + })) + if err != nil { + log.WithError(err).Errorf("Failed to send event.") + s.closeWithError(err) + return trace.Wrap(err) + } + return nil +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (s *auditStreamer) Done() <-chan struct{} { + return s.closeCtx.Done() +} + +// Error returns last error of the stream +func (s *auditStreamer) Error() error { + s.RLock() + defer s.RUnlock() + return s.err +} + +// recv is necessary to receive errors from the +// server, otherwise no errors will be propagated +func (s *auditStreamer) recv() { + for { + status, err := s.stream.Recv() + if err != nil { + s.closeWithError(trail.FromGRPC(err)) + return + } + select { + case <-s.closeCtx.Done(): + return + case s.statusCh <- *status: + default: + } + } +} + +func (s *auditStreamer) closeWithError(err error) { + s.cancel() + s.Lock() + defer s.Unlock() + s.err = err +} + +// EmitAuditEvent sends an auditable event to the auth server +func (c *Client) EmitAuditEvent(ctx context.Context, event events.AuditEvent) error { + grpcEvent, err := events.ToOneOf(event) + if err != nil { + return trace.Wrap(err) + } + clt, err := c.grpc() + if err != nil { + return trace.Wrap(err) + } + _, err = clt.EmitAuditEvent(ctx, grpcEvent) + if err != nil { + return trail.FromGRPC(err) + } + return nil +} + +// EmitAuditEventLegacy sends an auditable event to the auth server (part of events.IAuditLog interface) +func (c *Client) EmitAuditEventLegacy(event events.Event, fields events.EventFields) error { _, err := c.PostJSON(c.Endpoint("events"), &auditEventReq{ Event: event, Fields: fields, @@ -2938,6 +3097,8 @@ type ClientI interface { ProvisioningService services.Trust events.IAuditLog + events.Streamer + events.Emitter services.Presence services.Access services.DynamicAccess diff --git a/lib/auth/github.go b/lib/auth/github.go index 2d62b6ec97ae5..c922acc0e1537 100644 --- a/lib/auth/github.go +++ b/lib/auth/github.go @@ -67,12 +67,19 @@ func (s *AuthServer) upsertGithubConnector(ctx context.Context, connector servic if err := s.Identity.UpsertGithubConnector(connector); err != nil { return trace.Wrap(err) } - - if err := s.EmitAuditEvent(events.GithubConnectorCreated, events.EventFields{ - events.FieldName: connector.GetName(), - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(s.closeCtx, &events.GithubConnectorCreate{ + Metadata: events.Metadata{ + Type: events.GithubConnectorCreatedEvent, + Code: events.GithubConnectorCreatedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: connector.GetName(), + }, }); err != nil { - log.Warnf("Failed to emit GitHub connector create event: %v", err) + log.WithError(err).Warn("Failed to emit GitHub connector create event.") } return nil @@ -84,11 +91,19 @@ func (s *AuthServer) deleteGithubConnector(ctx context.Context, connectorName st return trace.Wrap(err) } - if err := s.EmitAuditEvent(events.GithubConnectorDeleted, events.EventFields{ - events.FieldName: connectorName, - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(s.closeCtx, &events.GithubConnectorDelete{ + Metadata: events.Metadata{ + Type: events.GithubConnectorDeletedEvent, + Code: events.GithubConnectorDeletedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: connectorName, + }, }); err != nil { - log.Warnf("Failed to emit GitHub connector delete event: %v", err) + log.WithError(err).Warn("Failed to emit GitHub connector delete event.") } return nil @@ -116,30 +131,32 @@ type GithubAuthResponse struct { // ValidateGithubAuthCallback validates Github auth callback redirect func (a *AuthServer) ValidateGithubAuthCallback(q url.Values) (*GithubAuthResponse, error) { re, err := a.validateGithubAuthCallback(q) - if err != nil { - fields := events.EventFields{ - events.LoginMethod: events.LoginMethodGithub, - events.AuthAttemptSuccess: false, - events.AuthAttemptErr: err.Error(), - } - if re != nil && re.claims != nil { - fields[events.IdentityAttributes] = re.claims - } - if err := a.EmitAuditEvent(events.UserSSOLoginFailure, fields); err != nil { - log.Warnf("Failed to emit GitHub login failure event: %v", err) - } - return nil, trace.Wrap(err) + event := &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + }, + Method: events.LoginMethodGithub, } - fields := events.EventFields{ - events.EventUser: re.auth.Username, - events.AuthAttemptSuccess: true, - events.LoginMethod: events.LoginMethodGithub, + if re != nil && re.claims != nil { + attributes, err := events.EncodeMapStrings(re.claims) + if err != nil { + log.WithError(err).Debugf("Failed to encode identity attributes.") + } else { + event.IdentityAttributes = attributes + } } - if re.claims != nil { - fields[events.IdentityAttributes] = re.claims + if err != nil { + event.Code = events.UserSSOLoginFailureCode + event.Status.Success = false + event.Status.Error = err.Error() + a.emitter.EmitAuditEvent(a.closeCtx, event) + return nil, trace.Wrap(err) } - if err := a.EmitAuditEvent(events.UserSSOLogin, fields); err != nil { - log.Warnf("Failed to emit GitHub login event: %v", err) + event.Code = events.UserSSOLoginFailureCode + event.Status.Success = true + event.User = re.auth.Username + if err := a.emitter.EmitAuditEvent(a.closeCtx, event); err != nil { + log.WithError(err).Warn("Failed to emit Github login event.") } return &re.auth, nil } @@ -580,13 +597,19 @@ func (c *githubAPIClient) getTeams() ([]teamResponse, error) { // Print warning to Teleport logs as well as the Audit Log. log.Warnf(warningMessage) - if err := c.authServer.EmitAuditEvent(events.UserSSOLoginFailure, events.EventFields{ - events.LoginMethod: events.LoginMethodGithub, - events.AuthAttemptMessage: warningMessage, + if err := c.authServer.emitter.EmitAuditEvent(c.authServer.closeCtx, &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + Code: events.UserSSOLoginFailureCode, + }, + Method: events.LoginMethodGithub, + Status: events.Status{ + Success: false, + Error: warningMessage, + }, }); err != nil { - log.Warnf("Failed to emit GitHub login failure event: %v", err) + log.WithError(err).Warn("Failed to emit GitHub login failure event.") } - return result, nil } diff --git a/lib/auth/grpcserver.go b/lib/auth/grpcserver.go index 79e671ea5f738..963dfd803a78f 100644 --- a/lib/auth/grpcserver.go +++ b/lib/auth/grpcserver.go @@ -1,5 +1,5 @@ /* -Copyright 2018-2019 Gravitational, Inc. +Copyright 2018-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,15 +18,17 @@ package auth import ( "context" + "crypto/tls" "io" - "net/http" - "strings" "time" "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/auth/proto" "github.com/gravitational/teleport/lib/backend" + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/session" "github.com/gravitational/teleport/lib/utils" "github.com/golang/protobuf/ptypes/empty" @@ -34,6 +36,8 @@ import ( "github.com/gravitational/trace/trail" "github.com/sirupsen/logrus" "google.golang.org/grpc" + _ "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/peer" ) @@ -41,10 +45,24 @@ import ( type GRPCServer struct { *logrus.Entry APIConfig - // httpHandler is a server serving HTTP API - httpHandler http.Handler - // grpcHandler is golang GRPC handler - grpcHandler *grpc.Server + server *grpc.Server +} + +// EmitAuditEvent emits audit event +func (g *GRPCServer) EmitAuditEvent(ctx context.Context, req *events.OneOf) (*empty.Empty, error) { + auth, err := g.authenticate(ctx) + if err != nil { + return nil, trail.ToGRPC(err) + } + event, err := events.FromOneOf(*req) + if err != nil { + return nil, trail.ToGRPC(err) + } + err = auth.EmitAuditEvent(ctx, event) + if err != nil { + return nil, trail.ToGRPC(err) + } + return &empty.Empty{}, nil } // SendKeepAlives allows node to send a stream of keep alive requests @@ -72,6 +90,127 @@ func (g *GRPCServer) SendKeepAlives(stream proto.AuthService_SendKeepAlivesServe } } +// CreateAuditStream creates or resumes audit event stream +func (g *GRPCServer) CreateAuditStream(stream proto.AuthService_CreateAuditStreamServer) error { + auth, err := g.authenticate(stream.Context()) + if err != nil { + return trail.ToGRPC(err) + } + + var eventStream events.Stream + g.Debugf("CreateAuditStream connection from %v.", auth.User.GetName()) + streamStart := time.Now() + processed := int64(0) + counter := 0 + forwardEvents := func(eventStream events.Stream) { + for { + select { + case <-stream.Context().Done(): + return + case statusUpdate := <-eventStream.Status(): + if err := stream.Send(&statusUpdate); err != nil { + g.WithError(err).Debugf("Failed to send status update.") + } + } + } + } + + closeStream := func(eventStream events.Stream) { + if err := eventStream.Close(auth.Context()); err != nil { + g.WithError(err).Warningf("Failed to flush close the stream.") + } else { + g.Debugf("Flushed and closed the stream.") + } + } + + for { + request, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + g.WithError(err).Debugf("Failed to receive stream request.") + return trail.ToGRPC(err) + } + if create := request.GetCreateStream(); create != nil { + if eventStream != nil { + return trail.ToGRPC(trace.BadParameter("stream is already created or resumed")) + } + eventStream, err = auth.CreateAuditStream(stream.Context(), session.ID(create.SessionID)) + if err != nil { + return trace.Wrap(err) + } + g.Debugf("Created stream: %v.", err) + go forwardEvents(eventStream) + defer closeStream(eventStream) + } else if resume := request.GetResumeStream(); resume != nil { + if eventStream != nil { + return trail.ToGRPC(trace.BadParameter("stream is already created or resumed")) + } + eventStream, err = auth.ResumeAuditStream(stream.Context(), session.ID(resume.SessionID), resume.UploadID) + if err != nil { + return trace.Wrap(err) + } + g.Debugf("Resumed stream: %v.", err) + go forwardEvents(eventStream) + defer closeStream(eventStream) + } else if complete := request.GetCompleteStream(); complete != nil { + if eventStream == nil { + return trail.ToGRPC(trace.BadParameter("stream is not initialized yet, cannot complete")) + } + // do not use stream context to give the auth server finish the upload + // even if the stream's context is cancelled + err := eventStream.Complete(auth.Context()) + g.Debugf("Completed stream: %v.", err) + if err != nil { + return trail.ToGRPC(err) + } + return nil + } else if flushAndClose := request.GetFlushAndCloseStream(); flushAndClose != nil { + if eventStream == nil { + return trail.ToGRPC(trace.BadParameter("stream is not initialized yet, cannot flush and close")) + } + // flush and close is always done + return nil + } else if oneof := request.GetEvent(); oneof != nil { + if eventStream == nil { + return trail.ToGRPC( + trace.BadParameter("stream cannot receive an event without first being created or resumed")) + } + event, err := events.FromOneOf(*oneof) + if err != nil { + g.WithError(err).Debugf("Failed to decode event.") + return trail.ToGRPC(err) + } + start := time.Now() + err = eventStream.EmitAuditEvent(stream.Context(), event) + if err != nil { + return trail.ToGRPC(err) + } + event.Size() + processed += int64(event.Size()) + seconds := time.Since(streamStart) / time.Second + counter++ + if counter%logInterval == 0 { + if seconds > 0 { + kbytes := float64(processed) / 1000 + g.Debugf("Processed %v events, tx rate kbytes %v/second.", counter, kbytes/float64(seconds)) + } + } + diff := time.Since(start) + if diff > 100*time.Millisecond { + log.Warningf("EmitAuditEvent(%v) took longer than 100ms: %v", event.GetType(), time.Since(event.GetTime())) + } + } else { + g.Errorf("Rejecting unsupported stream request: %v.", request) + return trail.ToGRPC(trace.BadParameter("unsupported stream request")) + } + } +} + +// logInterval is used to log stats after this many events +const logInterval = 10000 + // WatchEvents returns a new stream of cluster events func (g *GRPCServer) WatchEvents(watch *proto.Watch, stream proto.AuthService_WatchEventsServer) error { auth, err := g.authenticate(stream.Context()) @@ -524,38 +663,76 @@ func (g *GRPCServer) authenticate(ctx context.Context) (*grpcContext, error) { AuthContext: authContext, AuthWithRoles: &AuthWithRoles{ authServer: g.AuthServer, - user: authContext.User, - checker: authContext.Checker, - identity: authContext.Identity, + context: *authContext, sessions: g.SessionService, alog: g.AuthServer.IAuditLog, }, }, nil } +// GRPCServerConfig specifies GRPC server configuration +type GRPCServerConfig struct { + // APIConfig is GRPC server API configuration + APIConfig + // TLS is GRPC server config + TLS *tls.Config + // UnaryInterceptor intercepts individual GRPC requests + // for authentication and rate limiting + UnaryInterceptor grpc.UnaryServerInterceptor + // UnaryInterceptor intercepts GRPC streams + // for authentication and rate limiting + StreamInterceptor grpc.StreamServerInterceptor +} + +// CheckAndSetDefaults checks and sets default values +func (cfg *GRPCServerConfig) CheckAndSetDefaults() error { + if cfg.TLS == nil { + return trace.BadParameter("missing parameter TLS") + } + if cfg.UnaryInterceptor == nil { + return trace.BadParameter("missing parameter UnaryInterceptor") + } + if cfg.StreamInterceptor == nil { + return trace.BadParameter("missing parameter StreamInterceptor") + } + return nil +} + // NewGRPCServer returns a new instance of GRPC server -func NewGRPCServer(cfg APIConfig) http.Handler { +func NewGRPCServer(cfg GRPCServerConfig) (*GRPCServer, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + log.Debugf("GRPC(SERVER): keep alive %v count: %v.", cfg.KeepAlivePeriod, cfg.KeepAliveCount) + opts := []grpc.ServerOption{ + grpc.Creds(&httplib.TLSCreds{ + Config: cfg.TLS, + }), + grpc.UnaryInterceptor(cfg.UnaryInterceptor), + grpc.StreamInterceptor(cfg.StreamInterceptor), + grpc.KeepaliveParams( + keepalive.ServerParameters{ + Time: cfg.KeepAlivePeriod, + Timeout: cfg.KeepAlivePeriod * time.Duration(cfg.KeepAliveCount), + }, + ), + grpc.KeepaliveEnforcementPolicy( + keepalive.EnforcementPolicy{ + MinTime: cfg.KeepAlivePeriod, + PermitWithoutStream: true, + }, + ), + } + server := grpc.NewServer(opts...) authServer := &GRPCServer{ - APIConfig: cfg, + APIConfig: cfg.APIConfig, Entry: logrus.WithFields(logrus.Fields{ trace.Component: teleport.Component(teleport.ComponentAuth, teleport.ComponentGRPC), }), - httpHandler: NewAPIServer(&cfg), - grpcHandler: grpc.NewServer(), - } - proto.RegisterAuthServiceServer(authServer.grpcHandler, authServer) - return authServer -} - -// ServeHTTP dispatches requests based on the request type -func (g *GRPCServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // magic combo match signifying GRPC request - // https://grpc.io/blog/coreos - if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - g.grpcHandler.ServeHTTP(w, r) - } else { - g.httpHandler.ServeHTTP(w, r) + server: server, } + proto.RegisterAuthServiceServer(authServer.server, authServer) + return authServer, nil } func eventToGRPC(in services.Event) (*proto.Event, error) { diff --git a/lib/auth/helpers.go b/lib/auth/helpers.go index 65711d83ab01b..4e934b9d84447 100644 --- a/lib/auth/helpers.go +++ b/lib/auth/helpers.go @@ -76,12 +76,20 @@ func (cfg *TestAuthServerConfig) CheckAndSetDefaults() error { // CreateUploaderDir creates directory for file uploader service func CreateUploaderDir(dir string) error { + // DELETE IN(5.1.0) + // this folder is no longer used past 5.0 upgrade err := os.MkdirAll(filepath.Join(dir, teleport.LogsDir, teleport.ComponentUpload, events.SessionLogsDir, defaults.Namespace), teleport.SharedDirMode) if err != nil { return trace.ConvertSystemError(err) } + err = os.MkdirAll(filepath.Join(dir, teleport.LogsDir, teleport.ComponentUpload, + events.StreamingLogsDir, defaults.Namespace), teleport.SharedDirMode) + if err != nil { + return trace.ConvertSystemError(err) + } + return nil } @@ -124,14 +132,16 @@ func NewTestAuthServer(cfg TestAuthServerConfig) (*TestAuthServer, error) { // Wrap backend in sanitizer like in production. srv.Backend = backend.NewSanitizer(b) - srv.AuditLog, err = events.NewAuditLog(events.AuditLogConfig{ + localLog, err := events.NewAuditLog(events.AuditLogConfig{ DataDir: cfg.Dir, RecordSessions: true, ServerID: cfg.ClusterName, + UploadHandler: events.NewMemoryUploader(), }) if err != nil { return nil, trace.Wrap(err) } + srv.AuditLog = localLog srv.SessionServer, err = session.New(srv.Backend) if err != nil { @@ -155,6 +165,7 @@ func NewTestAuthServer(cfg TestAuthServerConfig) (*TestAuthServer, error) { Identity: identity, AuditLog: srv.AuditLog, SkipPeriodicOperations: true, + Emitter: localLog, }) if err != nil { return nil, trace.Wrap(err) @@ -455,7 +466,13 @@ func NewTestTLSServer(cfg TestTLSServerConfig) (*TestTLSServer, error) { return nil, trace.Wrap(err) } + srv.Listener, err = net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, trace.Wrap(err) + } + srv.TLSServer, err = NewTLSServer(TLSServerConfig{ + Listener: srv.Listener, AccessPoint: accessPoint, TLS: tlsConfig, APIConfig: *srv.APIConfig, @@ -592,14 +609,7 @@ func (t *TestTLSServer) Addr() net.Addr { // Start starts TLS server on loopback address on the first lisenting socket func (t *TestTLSServer) Start() error { - var err error - if t.Listener == nil { - t.Listener, err = net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return trace.Wrap(err) - } - } - go t.TLSServer.Serve(t.Listener) + go t.TLSServer.Serve() return nil } diff --git a/lib/auth/init.go b/lib/auth/init.go index 48fdb61517f25..87492269611ba 100644 --- a/lib/auth/init.go +++ b/lib/auth/init.go @@ -143,6 +143,13 @@ type InitConfig struct { // handshake) signatures for both host and user CAs. This option only // affects newly-created CAs. CASigningAlg *string + + // Emitter is events emitter, used to submit discrete events + Emitter events.Emitter + + // Streamer is events sessionstreamer, used to create continuous + // session related streams + Streamer events.Streamer } // Init instantiates and configures an instance of AuthServer diff --git a/lib/auth/methods.go b/lib/auth/methods.go index f2fd06c87c5d0..00c2c76ac72d0 100644 --- a/lib/auth/methods.go +++ b/lib/auth/methods.go @@ -84,23 +84,26 @@ type SessionCreds struct { // AuthenticateUser authenticates user based on the request type func (s *AuthServer) AuthenticateUser(req AuthenticateUserRequest) error { err := s.authenticateUser(req) - var emitErr error + event := &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + Code: events.UserLocalLoginFailureCode, + }, + UserMetadata: events.UserMetadata{ + User: req.Username, + }, + Method: events.LoginMethodLocal, + } if err != nil { - emitErr = s.EmitAuditEvent(events.UserLocalLoginFailure, events.EventFields{ - events.EventUser: req.Username, - events.LoginMethod: events.LoginMethodLocal, - events.AuthAttemptSuccess: false, - events.AuthAttemptErr: err.Error(), - }) + event.Code = events.UserLocalLoginFailureCode + event.Status.Success = false + event.Status.Error = err.Error() } else { - emitErr = s.EmitAuditEvent(events.UserLocalLogin, events.EventFields{ - events.EventUser: req.Username, - events.LoginMethod: events.LoginMethodLocal, - events.AuthAttemptSuccess: true, - }) + event.Code = events.UserLocalLoginCode + event.Status.Success = true } - if emitErr != nil { - log.Warnf("Failed to emit user login event: %v", err) + if err := s.emitter.EmitAuditEvent(s.closeCtx, event); err != nil { + log.WithError(err).Warn("Failed to emit login event.") } return err } @@ -359,16 +362,20 @@ func (s *AuthServer) AuthenticateSSHUser(req AuthenticateSSHRequest) (*SSHLoginR // emitNoLocalAuthEvent creates and emits a local authentication is disabled message. func (s *AuthServer) emitNoLocalAuthEvent(username string) { - fields := events.EventFields{ - events.AuthAttemptSuccess: false, - events.AuthAttemptErr: noLocalAuth, - } - if username != "" { - fields[events.EventUser] = username - } - - if err := s.IAuditLog.EmitAuditEvent(events.AuthAttemptFailure, fields); err != nil { - log.Warnf("Failed to emit no local auth event: %v", err) + if err := s.emitter.EmitAuditEvent(s.closeCtx, &events.AuthAttempt{ + Metadata: events.Metadata{ + Type: events.AuthAttemptEvent, + Code: events.AuthAttemptFailureCode, + }, + UserMetadata: events.UserMetadata{ + User: username, + }, + Status: events.Status{ + Success: false, + Error: noLocalAuth, + }, + }); err != nil { + log.WithError(err).Warn("Failed to emit no local auth event.") } } diff --git a/lib/auth/middleware.go b/lib/auth/middleware.go index afb4e5bb07224..c7ee48a7b4459 100644 --- a/lib/auth/middleware.go +++ b/lib/auth/middleware.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Gravitational, Inc. +Copyright 2017-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,17 +27,24 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/limiter" + "github.com/gravitational/teleport/lib/multiplexer" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/trace" + "github.com/gravitational/trace/trail" "github.com/sirupsen/logrus" "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" ) // TLSServerConfig is a configuration for TLS server type TLSServerConfig struct { + // Listener is a listener to bind to + Listener net.Listener // TLS is a base TLS configuration TLS *tls.Config // API is API server configuration @@ -51,10 +58,18 @@ type TLSServerConfig struct { // AcceptedUsage restricts authentication // to a subset of certificates based on the metadata AcceptedUsage []string + // ID is an optional debugging ID + ID string } // CheckAndSetDefaults checks and sets default values func (c *TLSServerConfig) CheckAndSetDefaults() error { + if err := c.APIConfig.CheckAndSetDefaults(); err != nil { + return trace.Wrap(err) + } + if c.Listener == nil { + return trace.BadParameter("missing parameter Listener") + } if c.TLS == nil { return trace.BadParameter("missing parameter TLS") } @@ -79,11 +94,17 @@ func (c *TLSServerConfig) CheckAndSetDefaults() error { // TLSServer is TLS auth server type TLSServer struct { - *http.Server - // TLSServerConfig is TLS server configuration used for auth server - TLSServerConfig - // Entry is TLS server logging entry - *logrus.Entry + // httpServer is HTTP/1.1 part of the server + httpServer *http.Server + // grpcServer is GRPC server + grpcServer *GRPCServer + // cfg is TLS server configuration used for auth server + cfg TLSServerConfig + // log is TLS server logging entry + log *logrus.Entry + // mux is a listener that multiplexes HTTP/2 and HTTP/1.1 + // on different listeners + mux *multiplexer.TLSListener } // NewTLSServer returns new unstarted TLS server @@ -103,8 +124,9 @@ func NewTLSServer(cfg TLSServerConfig) (*TLSServer, error) { authMiddleware := &AuthMiddleware{ AccessPoint: cfg.AccessPoint, AcceptedUsage: cfg.AcceptedUsage, + Limiter: limiter, } - authMiddleware.Wrap(NewGRPCServer(cfg.APIConfig)) + authMiddleware.Wrap(NewAPIServer(&cfg.APIConfig)) // Wrap sets the next middleware in chain to the authMiddleware limiter.WrapHandle(authMiddleware) // force client auth if given @@ -112,22 +134,91 @@ func NewTLSServer(cfg TLSServerConfig) (*TLSServer, error) { cfg.TLS.NextProtos = []string{http2.NextProtoTLS} server := &TLSServer{ - TLSServerConfig: cfg, - Server: &http.Server{ + cfg: cfg, + httpServer: &http.Server{ Handler: limiter, ReadHeaderTimeout: defaults.DefaultDialTimeout, }, - Entry: logrus.WithFields(logrus.Fields{ + log: logrus.WithFields(logrus.Fields{ trace.Component: cfg.Component, }), } - server.TLS.GetConfigForClient = server.GetConfigForClient + server.cfg.TLS.GetConfigForClient = server.GetConfigForClient + + server.grpcServer, err = NewGRPCServer(GRPCServerConfig{ + TLS: server.cfg.TLS, + APIConfig: cfg.APIConfig, + UnaryInterceptor: authMiddleware.UnaryInterceptor, + StreamInterceptor: authMiddleware.StreamInterceptor, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + server.mux, err = multiplexer.NewTLSListener(multiplexer.TLSListenerConfig{ + Listener: tls.NewListener(cfg.Listener, server.cfg.TLS), + ID: cfg.ID, + }) + if err != nil { + return nil, trace.Wrap(err) + } + return server, nil } -// Serve takes TCP listener, upgrades to TLS using config and starts serving -func (t *TLSServer) Serve(listener net.Listener) error { - return t.Server.Serve(tls.NewListener(listener, t.TLS)) +// Close closes TLS server non-gracefully - terminates in flight connections +func (t *TLSServer) Close() error { + errC := make(chan error, 2) + go func() { + errC <- t.httpServer.Close() + }() + go func() { + t.grpcServer.server.Stop() + errC <- nil + }() + errors := []error{} + for i := 0; i < 2; i++ { + errors = append(errors, <-errC) + } + return trace.NewAggregate(errors...) +} + +// Shutdown shuts down TLS server +func (t *TLSServer) Shutdown(ctx context.Context) error { + errC := make(chan error, 2) + go func() { + errC <- t.httpServer.Shutdown(ctx) + }() + go func() { + t.grpcServer.server.GracefulStop() + errC <- nil + }() + errors := []error{} + for i := 0; i < 2; i++ { + errors = append(errors, <-errC) + } + return trace.NewAggregate(errors...) +} + +// Serve starts GRPC and HTTP1.1 services on the mux listener +func (t *TLSServer) Serve() error { + errC := make(chan error, 2) + go func() { + if err := t.mux.Serve(); err != nil { + t.log.WithError(err).Warningf("Mux serve failed.") + } + }() + go func() { + errC <- t.httpServer.Serve(t.mux.HTTP()) + }() + go func() { + errC <- t.grpcServer.server.Serve(t.mux.HTTP2()) + }() + errors := []error{} + for i := 0; i < 2; i++ { + errors = append(errors, <-errC) + } + return trace.NewAggregate(errors...) } // GetConfigForClient is getting called on every connection @@ -144,12 +235,12 @@ func (t *TLSServer) GetConfigForClient(info *tls.ClientHelloInfo) (*tls.Config, // // Instead, this case should either default to current cluster CAs or // return an error. - t.Debugf("Client %q sent %q in SNI, which causes this auth server to send all known CAs in TLS handshake. If this client is version 4.2 or older, this is expected; if this client is version 4.3 or above, please let us know at https://github.com/gravitational/teleport/issues/new", info.Conn.RemoteAddr(), info.ServerName) + t.log.Debugf("Client %q sent %q in SNI, which causes this auth server to send all known CAs in TLS handshake. If this client is version 4.2 or older, this is expected; if this client is version 4.3 or above, please let us know at https://github.com/gravitational/teleport/issues/new", info.Conn.RemoteAddr(), info.ServerName) default: clusterName, err = DecodeClusterName(info.ServerName) if err != nil { if !trace.IsNotFound(err) { - t.Warningf("Client sent unsupported cluster name %q, what resulted in error %v.", info.ServerName, err) + t.log.Warningf("Client sent unsupported cluster name %q, what resulted in error %v.", info.ServerName, err) return nil, trace.AccessDenied("access is denied") } } @@ -159,13 +250,13 @@ func (t *TLSServer) GetConfigForClient(info *tls.ClientHelloInfo) (*tls.Config, // certificate authorities. // TODO(klizhentas) drop connections of the TLS cert authorities // that are not trusted - pool, err := ClientCertPool(t.AccessPoint, clusterName) + pool, err := ClientCertPool(t.cfg.AccessPoint, clusterName) if err != nil { var ourClusterName string - if clusterName, err := t.AccessPoint.GetClusterName(); err == nil { + if clusterName, err := t.cfg.AccessPoint.GetClusterName(); err == nil { ourClusterName = clusterName.GetClusterName() } - t.Errorf("Failed to retrieve client pool. Client cluster %v, target cluster %v, error: %v.", clusterName, ourClusterName, trace.DebugReport(err)) + t.log.Errorf("Failed to retrieve client pool. Client cluster %v, target cluster %v, error: %v.", clusterName, ourClusterName, trace.DebugReport(err)) // this falls back to the default config return nil, nil } @@ -189,10 +280,10 @@ func (t *TLSServer) GetConfigForClient(info *tls.ClientHelloInfo) (*tls.Config, return nil, trace.BadParameter("number of CAs in client cert pool is too large (%d) and cannot be encoded in a TLS handshake; this is due to a large number of trusted clusters; try updating tsh to the latest version; if that doesn't help, remove some trusted clusters", len(pool.Subjects())) } - tlsCopy := t.TLS.Clone() + tlsCopy := t.cfg.TLS.Clone() tlsCopy.ClientCAs = pool for _, cert := range tlsCopy.Certificates { - t.Debugf("Server certificate %v.", TLSCertInfo(&cert)) + t.log.Debugf("Server certificate %v.", TLSCertInfo(&cert)) } return tlsCopy, nil } @@ -210,6 +301,8 @@ type AuthMiddleware struct { // if set, will accept certificates with non-limited usage, // and usage exactly matching the specified values. AcceptedUsage []string + // Limiter is a rate and connection limiter + Limiter *limiter.Limiter } // Wrap sets next handler in chain @@ -217,9 +310,84 @@ func (a *AuthMiddleware) Wrap(h http.Handler) { a.Handler = h } +// UnaryInterceptor is GPRC unary interceptor that authenticates requests +// and passes the user information as context metadata +func (a *AuthMiddleware) UnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + peerInfo, ok := peer.FromContext(ctx) + if !ok { + return nil, trail.ToGRPC(trace.AccessDenied("missing authentication")) + } + // Limit requests per second and simultaneous connection by client IP. + clientIP, _, err := net.SplitHostPort(peerInfo.Addr.String()) + if err != nil { + log.WithError(err).Debugf("Failed to get client IP.") + return nil, trail.ToGRPC(trace.BadParameter("missing client IP")) + } + if err := a.Limiter.RegisterRequest(clientIP); err != nil { + return nil, trail.ToGRPC(trace.LimitExceeded("rate limit exceeded")) + } + if err := a.Limiter.ConnLimiter.Acquire(clientIP, 1); err != nil { + return nil, trail.ToGRPC(trace.LimitExceeded("connection limit exceeded")) + } + defer a.Limiter.ConnLimiter.Release(clientIP, 1) + + tlsInfo, ok := peerInfo.AuthInfo.(credentials.TLSInfo) + if !ok { + return nil, trail.ToGRPC(trace.AccessDenied("missing authentication")) + } + user, err := a.GetUser(tlsInfo.State) + if err != nil { + return nil, trail.FromGRPC(err) + } + return handler(context.WithValue(ctx, ContextUser, user), req) +} + +// StreamInterceptor is GPRC unary interceptor that authenticates requests +// and passes the user information as context metadata +func (a *AuthMiddleware) StreamInterceptor(srv interface{}, serverStream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + peerInfo, ok := peer.FromContext(serverStream.Context()) + if !ok { + return trail.ToGRPC(trace.AccessDenied("missing authentication")) + } + // Limit requests per second and simultaneous connection by client IP. + clientIP, _, err := net.SplitHostPort(peerInfo.Addr.String()) + if err != nil { + log.WithError(err).Debugf("Failed to get client IP.") + return trail.ToGRPC(trace.BadParameter("missing client IP")) + } + if err := a.Limiter.RegisterRequest(clientIP); err != nil { + return trail.ToGRPC(trace.LimitExceeded("rate limit exceeded")) + } + if err := a.Limiter.ConnLimiter.Acquire(clientIP, 1); err != nil { + return trail.ToGRPC(trace.LimitExceeded("connection limit exceeded")) + } + defer a.Limiter.ConnLimiter.Release(clientIP, 1) + tlsInfo, ok := peerInfo.AuthInfo.(credentials.TLSInfo) + if !ok { + return trail.ToGRPC(trace.AccessDenied("missing authentication")) + } + user, err := a.GetUser(tlsInfo.State) + if err != nil { + return trail.ToGRPC(err) + } + return handler(srv, &authenticatedStream{ctx: context.WithValue(serverStream.Context(), ContextUser, user), ServerStream: serverStream}) +} + +// authenticatedStream wraps around the embedded grpc.ServerStream +// provides new context with additional metadata +type authenticatedStream struct { + ctx context.Context + grpc.ServerStream +} + +// Context specifies stream context with authentication metadata +func (a *authenticatedStream) Context() context.Context { + return a.ctx +} + // GetUser returns authenticated user based on request metadata set by HTTP server -func (a *AuthMiddleware) GetUser(r *http.Request) (IdentityGetter, error) { - peers := r.TLS.PeerCertificates +func (a *AuthMiddleware) GetUser(connState tls.ConnectionState) (IdentityGetter, error) { + peers := connState.PeerCertificates if len(peers) > 1 { // when turning intermediaries on, don't forget to verify // https://github.com/kubernetes/kubernetes/pull/34524/files#diff-2b283dde198c92424df5355f39544aa4R59 @@ -336,7 +504,11 @@ func (a *AuthMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { if baseContext == nil { baseContext = context.TODO() } - user, err := a.GetUser(r) + if r.TLS == nil { + trace.WriteError(w, trace.AccessDenied("missing authentication")) + return + } + user, err := a.GetUser(*r.TLS) if err != nil { trace.WriteError(w, err) return diff --git a/lib/auth/oidc.go b/lib/auth/oidc.go index a31fbc3fabecc..b115565c401c4 100644 --- a/lib/auth/oidc.go +++ b/lib/auth/oidc.go @@ -1,5 +1,5 @@ /* -Copyright 2017-2019 Gravitational, Inc. +Copyright 2017-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -102,13 +102,19 @@ func (s *AuthServer) createOIDCClient(conn services.OIDCConnector) (*oidc.Client "unknown problem with connector %v, most likely URL %q is not valid or not accessible, check configuration and try to re-create the connector", conn.GetName(), conn.GetIssuerURL()) } - if err := s.EmitAuditEvent(events.UserSSOLoginFailure, events.EventFields{ - events.LoginMethod: events.LoginMethodOIDC, - events.AuthAttemptSuccess: false, - events.AuthAttemptErr: trace.Unwrap(ctx.Err()).Error(), - events.AuthAttemptMessage: err.Error(), + if err := s.emitter.EmitAuditEvent(ctx, &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + Code: events.UserSSOLoginFailureCode, + }, + Method: events.LoginMethodOIDC, + Status: events.Status{ + Success: false, + Error: trace.Unwrap(ctx.Err()).Error(), + UserMessage: err.Error(), + }, }); err != nil { - log.Warnf("Failed to emit OIDC login failure event: %v", err) + log.WithError(err).Warn("Failed to emit OIDC login failure event.") } // return user-friendly error hiding the actual error in the event // logs for security purposes @@ -142,12 +148,19 @@ func (s *AuthServer) UpsertOIDCConnector(ctx context.Context, connector services if err := s.Identity.UpsertOIDCConnector(connector); err != nil { return trace.Wrap(err) } - - if err := s.EmitAuditEvent(events.OIDCConnectorCreated, events.EventFields{ - events.FieldName: connector.GetName(), - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(ctx, &events.OIDCConnectorCreate{ + Metadata: events.Metadata{ + Type: events.OIDCConnectorCreatedEvent, + Code: events.OIDCConnectorCreatedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: connector.GetName(), + }, }); err != nil { - log.Warnf("Failed to emit OIDC connector create event: %v", err) + log.WithError(err).Warn("Failed to emit OIDC connector create event.") } return nil @@ -158,14 +171,20 @@ func (s *AuthServer) DeleteOIDCConnector(ctx context.Context, connectorName stri if err := s.Identity.DeleteOIDCConnector(connectorName); err != nil { return trace.Wrap(err) } - - if err := s.EmitAuditEvent(events.OIDCConnectorDeleted, events.EventFields{ - events.FieldName: connectorName, - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(ctx, &events.OIDCConnectorDelete{ + Metadata: events.Metadata{ + Type: events.OIDCConnectorDeletedEvent, + Code: events.OIDCConnectorDeletedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: connectorName, + }, }); err != nil { - log.Warnf("Failed to emit OIDC connector delete event: %v", err) + log.WithError(err).Warn("Failed to emit OIDC connector delete event.") } - return nil } @@ -221,31 +240,41 @@ func (s *AuthServer) CreateOIDCAuthRequest(req services.OIDCAuthRequest) (*servi // will respond with OIDCAuthResponse, otherwise it will return error func (a *AuthServer) ValidateOIDCAuthCallback(q url.Values) (*OIDCAuthResponse, error) { re, err := a.validateOIDCAuthCallback(q) + event := &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + }, + Method: events.LoginMethodOIDC, + } if err != nil { - fields := events.EventFields{ - events.LoginMethod: events.LoginMethodOIDC, - events.AuthAttemptSuccess: false, - // log the original internal error in audit log - events.AuthAttemptErr: trace.Unwrap(err).Error(), - } + event.Code = events.UserSSOLoginFailureCode + event.Status.Success = false + event.Status.Error = trace.Unwrap(err).Error() + event.Status.UserMessage = err.Error() if re != nil && re.claims != nil { - fields[events.IdentityAttributes] = re.claims - } - if err := a.EmitAuditEvent(events.UserSSOLoginFailure, fields); err != nil { - log.Warnf("Failed to emit OIDC login failure event: %v", err) + attributes, err := events.EncodeMap(re.claims) + if err != nil { + log.WithError(err).Debugf("Failed to encode identity attributes.") + } else { + event.IdentityAttributes = attributes + } } + a.emitter.EmitAuditEvent(a.closeCtx, event) return nil, trace.Wrap(err) } - fields := events.EventFields{ - events.EventUser: re.auth.Username, - events.AuthAttemptSuccess: true, - events.LoginMethod: events.LoginMethodOIDC, - } + event.Code = events.UserSSOLoginCode + event.User = re.auth.Username + event.Status.Success = true if re.claims != nil { - fields[events.IdentityAttributes] = re.claims + attributes, err := events.EncodeMap(re.claims) + if err != nil { + log.WithError(err).Debugf("Failed to encode identity attributes.") + } else { + event.IdentityAttributes = attributes + } } - if err := a.EmitAuditEvent(events.UserSSOLogin, fields); err != nil { - log.Warnf("Failed to emit OIDC login event: %v", err) + if err := a.emitter.EmitAuditEvent(a.closeCtx, event); err != nil { + log.WithError(err).Warn("Failed to emit OIDC login event.") } return &re.auth, nil } @@ -635,11 +664,12 @@ func (a *AuthServer) newGsuiteClient(config *jwt.Config, issuerURL string, userE return &gsuiteClient{ domain: domain, - client: config.Client(context.TODO()), + client: config.Client(a.closeCtx), url: *u, userEmail: userEmail, config: config, - auditLog: a, + emitter: a.emitter, + ctx: a.closeCtx, }, nil } @@ -649,7 +679,8 @@ type gsuiteClient struct { userEmail string domain string config *jwt.Config - auditLog events.IAuditLog + emitter events.Emitter + ctx context.Context } // fetchGroups fetches GSuite groups a user belongs to and returns @@ -666,11 +697,18 @@ collect: // Print warning to Teleport logs as well as the Audit Log. log.Warnf(warningMessage) - if err := g.auditLog.EmitAuditEvent(events.UserSSOLoginFailure, events.EventFields{ - events.LoginMethod: events.LoginMethodOIDC, - events.AuthAttemptMessage: warningMessage, + if err := g.emitter.EmitAuditEvent(g.ctx, &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + Code: events.UserSSOLoginFailureCode, + }, + Method: events.LoginMethodOIDC, + Status: events.Status{ + Success: false, + Error: warningMessage, + }, }); err != nil { - log.Warnf("Failed to emit OIDC login failure event: %v", err) + log.WithError(err).Warnf("Failed to emit OIDC login failure event.") } break collect } diff --git a/lib/auth/password.go b/lib/auth/password.go index 2014e170e9b86..7dd2398b4142b 100644 --- a/lib/auth/password.go +++ b/lib/auth/password.go @@ -110,12 +110,17 @@ func (s *AuthServer) ChangePassword(req services.ChangePasswordReq) error { return trace.Wrap(err) } - if err := s.EmitAuditEvent(events.UserPasswordChange, events.EventFields{ - events.EventUser: userID, + if err := s.emitter.EmitAuditEvent(s.closeCtx, &events.UserPasswordChange{ + Metadata: events.Metadata{ + Type: events.UserPasswordChangeEvent, + Code: events.UserPasswordChangeCode, + }, + UserMetadata: events.UserMetadata{ + User: userID, + }, }); err != nil { - log.Warnf("Failed to emit password change event: %v", err) + log.WithError(err).Warn("Failed to emit password change event.") } - return nil } diff --git a/lib/auth/password_test.go b/lib/auth/password_test.go index bc1f567239d36..5f741a7125b66 100644 --- a/lib/auth/password_test.go +++ b/lib/auth/password_test.go @@ -42,9 +42,9 @@ import ( ) type PasswordSuite struct { - bk backend.Backend - a *AuthServer - mockedAuditLog *events.MockAuditLog + bk backend.Backend + a *AuthServer + mockEmitter *events.MockEmitter } var _ = fmt.Printf @@ -96,8 +96,8 @@ func (s *PasswordSuite) SetUpTest(c *C) { err = s.a.SetStaticTokens(staticTokens) c.Assert(err, IsNil) - s.mockedAuditLog = events.NewMockAuditLog(0) - s.a.IAuditLog = s.mockedAuditLog + s.mockEmitter = &events.MockEmitter{} + s.a.emitter = s.mockEmitter } func (s *PasswordSuite) TearDownTest(c *C) { @@ -191,8 +191,8 @@ func (s *PasswordSuite) TestChangePassword(c *C) { err = s.a.ChangePassword(req) c.Assert(err, IsNil) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, DeepEquals, events.UserPasswordChange) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.EventUser], Equals, "user1") + c.Assert(s.mockEmitter.LastEvent().GetType(), DeepEquals, events.UserPasswordChangeEvent) + c.Assert(s.mockEmitter.LastEvent().(*events.UserPasswordChange).User, Equals, "user1") s.shouldLockAfterFailedAttempts(c, req) diff --git a/lib/auth/permissions.go b/lib/auth/permissions.go index d3a8927f871b3..bad2cdbb2b58c 100644 --- a/lib/auth/permissions.go +++ b/lib/auth/permissions.go @@ -19,6 +19,7 @@ package auth import ( "context" "fmt" + "strings" "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/services" @@ -70,8 +71,9 @@ type AuthContext struct { User services.User // Checker is access checker Checker services.AccessChecker - // Identity is x509 derived identity - Identity tlsca.Identity + // Identity holds user identity - whether it's a local or remote user, + // local or remote node, proxy or auth server + Identity IdentityGetter } // Authorize authorizes user based on identity supplied via context @@ -84,12 +86,11 @@ func (a *authorizer) Authorize(ctx context.Context) (*AuthContext, error) { if !ok { return nil, trace.AccessDenied("unsupported context type %T", userI) } - identity := userWithIdentity.GetIdentity() authContext, err := a.fromUser(userI) if err != nil { return nil, trace.Wrap(err) } - authContext.Identity = identity + authContext.Identity = userWithIdentity return authContext, nil } @@ -258,7 +259,7 @@ func GetCheckerForBuiltinRole(clusterName string, clusterConfig services.Cluster case teleport.RoleProxy: // if in recording mode, return a different set of permissions than regular // mode. recording proxy needs to be able to generate host certificates. - if clusterConfig.GetSessionRecording() == services.RecordAtProxy { + if services.IsRecordAtProxy(clusterConfig.GetSessionRecording()) { return services.FromSpec( role.String(), services.RoleSpecV3{ @@ -442,7 +443,7 @@ func contextForLocalUser(u LocalUser, identity services.UserGetter, access servi if err != nil { return nil, trace.Wrap(err) } - roles, traits, err := services.ExtractFromIdentity(identity, &u.Identity) + roles, traits, err := services.ExtractFromIdentity(identity, u.Identity) if err != nil { return nil, trace.Wrap(err) } @@ -506,6 +507,14 @@ type IdentityGetter interface { GetIdentity() tlsca.Identity } +// WrapIdentity wraps identity to return identity getter function +type WrapIdentity tlsca.Identity + +// GetIdentity returns identity +func (i WrapIdentity) GetIdentity() tlsca.Identity { + return tlsca.Identity(i) +} + // BuiltinRole is the role of the Teleport service. type BuiltinRole struct { // GetClusterConfig fetches cluster configuration. @@ -524,6 +533,24 @@ type BuiltinRole struct { Identity tlsca.Identity } +// IsServer returns true if the role is one of auth, proxy or node +func (r BuiltinRole) IsServer() bool { + return r.Role == teleport.RoleProxy || r.Role == teleport.RoleNode || r.Role == teleport.RoleAuth +} + +// GetServerID extracts the identity from the full name. The username +// extracted from the node's identity (x.509 certificate) is expected to +// consist of "." so strip the cluster name suffix +// to get the server id. +// +// Note that as of right now Teleport expects server id to be a UUID4 but +// older Gravity clusters used to override it with strings like +// "192_168_1_1." so this code can't rely on it being +// UUID4 to account for clusters upgraded from older versions. +func (r BuiltinRole) GetServerID() string { + return strings.TrimSuffix(r.Identity.Username, "."+r.ClusterName) +} + // GetIdentity returns client identity func (r BuiltinRole) GetIdentity() tlsca.Identity { return r.Identity diff --git a/lib/auth/proto/auth.pb.go b/lib/auth/proto/auth.pb.go index b62f9b39af0a0..2b4188352829e 100644 --- a/lib/auth/proto/auth.pb.go +++ b/lib/auth/proto/auth.pb.go @@ -9,6 +9,7 @@ import math "math" import _ "github.com/gogo/protobuf/gogoproto" import empty "github.com/golang/protobuf/ptypes/empty" import _ "github.com/golang/protobuf/ptypes/timestamp" +import events "github.com/gravitational/teleport/lib/events" import services "github.com/gravitational/teleport/lib/services" import time "time" @@ -60,7 +61,7 @@ func (x Operation) String() string { return proto.EnumName(Operation_name, int32(x)) } func (Operation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{0} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{0} } // Event returns cluster event @@ -93,7 +94,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{0} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{0} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +584,7 @@ func (m *Watch) Reset() { *m = Watch{} } func (m *Watch) String() string { return proto.CompactTextString(m) } func (*Watch) ProtoMessage() {} func (*Watch) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{1} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{1} } func (m *Watch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -641,7 +642,7 @@ func (m *WatchKind) Reset() { *m = WatchKind{} } func (m *WatchKind) String() string { return proto.CompactTextString(m) } func (*WatchKind) ProtoMessage() {} func (*WatchKind) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{2} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{2} } func (m *WatchKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -713,7 +714,7 @@ func (m *Certs) Reset() { *m = Certs{} } func (m *Certs) String() string { return proto.CompactTextString(m) } func (*Certs) ProtoMessage() {} func (*Certs) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{3} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{3} } func (m *Certs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -787,7 +788,7 @@ func (m *UserCertsRequest) Reset() { *m = UserCertsRequest{} } func (m *UserCertsRequest) String() string { return proto.CompactTextString(m) } func (*UserCertsRequest) ProtoMessage() {} func (*UserCertsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{4} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{4} } func (m *UserCertsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -873,7 +874,7 @@ func (m *GetUserRequest) Reset() { *m = GetUserRequest{} } func (m *GetUserRequest) String() string { return proto.CompactTextString(m) } func (*GetUserRequest) ProtoMessage() {} func (*GetUserRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{5} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{5} } func (m *GetUserRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -929,7 +930,7 @@ func (m *GetUsersRequest) Reset() { *m = GetUsersRequest{} } func (m *GetUsersRequest) String() string { return proto.CompactTextString(m) } func (*GetUsersRequest) ProtoMessage() {} func (*GetUsersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{6} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{6} } func (m *GetUsersRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -977,7 +978,7 @@ func (m *AccessRequests) Reset() { *m = AccessRequests{} } func (m *AccessRequests) String() string { return proto.CompactTextString(m) } func (*AccessRequests) ProtoMessage() {} func (*AccessRequests) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{7} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{7} } func (m *AccessRequests) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1025,7 +1026,7 @@ func (m *PluginDataSeq) Reset() { *m = PluginDataSeq{} } func (m *PluginDataSeq) String() string { return proto.CompactTextString(m) } func (*PluginDataSeq) ProtoMessage() {} func (*PluginDataSeq) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{8} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{8} } func (m *PluginDataSeq) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1081,7 +1082,7 @@ func (m *RequestStateSetter) Reset() { *m = RequestStateSetter{} } func (m *RequestStateSetter) String() string { return proto.CompactTextString(m) } func (*RequestStateSetter) ProtoMessage() {} func (*RequestStateSetter) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{9} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{9} } func (m *RequestStateSetter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1143,7 +1144,7 @@ func (m *RequestID) Reset() { *m = RequestID{} } func (m *RequestID) String() string { return proto.CompactTextString(m) } func (*RequestID) ProtoMessage() {} func (*RequestID) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{10} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{10} } func (m *RequestID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1193,7 +1194,7 @@ func (m *RotateResetPasswordTokenSecretsRequest) Reset() { func (m *RotateResetPasswordTokenSecretsRequest) String() string { return proto.CompactTextString(m) } func (*RotateResetPasswordTokenSecretsRequest) ProtoMessage() {} func (*RotateResetPasswordTokenSecretsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{11} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{11} } func (m *RotateResetPasswordTokenSecretsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1241,7 +1242,7 @@ func (m *GetResetPasswordTokenRequest) Reset() { *m = GetResetPasswordTo func (m *GetResetPasswordTokenRequest) String() string { return proto.CompactTextString(m) } func (*GetResetPasswordTokenRequest) ProtoMessage() {} func (*GetResetPasswordTokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{12} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{12} } func (m *GetResetPasswordTokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1295,7 +1296,7 @@ func (m *CreateResetPasswordTokenRequest) Reset() { *m = CreateResetPass func (m *CreateResetPasswordTokenRequest) String() string { return proto.CompactTextString(m) } func (*CreateResetPasswordTokenRequest) ProtoMessage() {} func (*CreateResetPasswordTokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{13} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{13} } func (m *CreateResetPasswordTokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1356,7 +1357,7 @@ func (m *PingRequest) Reset() { *m = PingRequest{} } func (m *PingRequest) String() string { return proto.CompactTextString(m) } func (*PingRequest) ProtoMessage() {} func (*PingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{14} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{14} } func (m *PingRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1400,7 +1401,7 @@ func (m *PingResponse) Reset() { *m = PingResponse{} } func (m *PingResponse) String() string { return proto.CompactTextString(m) } func (*PingResponse) ProtoMessage() {} func (*PingResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{15} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{15} } func (m *PingResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1456,7 +1457,7 @@ func (m *DeleteUserRequest) Reset() { *m = DeleteUserRequest{} } func (m *DeleteUserRequest) String() string { return proto.CompactTextString(m) } func (*DeleteUserRequest) ProtoMessage() {} func (*DeleteUserRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{16} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{16} } func (m *DeleteUserRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1504,7 +1505,7 @@ func (m *Semaphores) Reset() { *m = Semaphores{} } func (m *Semaphores) String() string { return proto.CompactTextString(m) } func (*Semaphores) ProtoMessage() {} func (*Semaphores) Descriptor() ([]byte, []int) { - return fileDescriptor_auth_df26b5d43b9135f6, []int{17} + return fileDescriptor_auth_4edeb1cba1dbd286, []int{17} } func (m *Semaphores) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1540,1547 +1541,1601 @@ func (m *Semaphores) GetSemaphores() []*services.SemaphoreV3 { return nil } -func init() { - proto.RegisterType((*Event)(nil), "proto.Event") - proto.RegisterType((*Watch)(nil), "proto.Watch") - proto.RegisterType((*WatchKind)(nil), "proto.WatchKind") - proto.RegisterMapType((map[string]string)(nil), "proto.WatchKind.FilterEntry") - proto.RegisterType((*Certs)(nil), "proto.Certs") - proto.RegisterType((*UserCertsRequest)(nil), "proto.UserCertsRequest") - proto.RegisterType((*GetUserRequest)(nil), "proto.GetUserRequest") - proto.RegisterType((*GetUsersRequest)(nil), "proto.GetUsersRequest") - proto.RegisterType((*AccessRequests)(nil), "proto.AccessRequests") - proto.RegisterType((*PluginDataSeq)(nil), "proto.PluginDataSeq") - proto.RegisterType((*RequestStateSetter)(nil), "proto.RequestStateSetter") - proto.RegisterType((*RequestID)(nil), "proto.RequestID") - proto.RegisterType((*RotateResetPasswordTokenSecretsRequest)(nil), "proto.RotateResetPasswordTokenSecretsRequest") - proto.RegisterType((*GetResetPasswordTokenRequest)(nil), "proto.GetResetPasswordTokenRequest") - proto.RegisterType((*CreateResetPasswordTokenRequest)(nil), "proto.CreateResetPasswordTokenRequest") - proto.RegisterType((*PingRequest)(nil), "proto.PingRequest") - proto.RegisterType((*PingResponse)(nil), "proto.PingResponse") - proto.RegisterType((*DeleteUserRequest)(nil), "proto.DeleteUserRequest") - proto.RegisterType((*Semaphores)(nil), "proto.Semaphores") - proto.RegisterEnum("proto.Operation", Operation_name, Operation_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for AuthService service - -type AuthServiceClient interface { - // SendKeepAlives allows node to send a stream of keep alive requests - SendKeepAlives(ctx context.Context, opts ...grpc.CallOption) (AuthService_SendKeepAlivesClient, error) - // WatchEvents returns a new stream of cluster events - WatchEvents(ctx context.Context, in *Watch, opts ...grpc.CallOption) (AuthService_WatchEventsClient, error) - // UpsertNode upserts node - UpsertNode(ctx context.Context, in *services.ServerV2, opts ...grpc.CallOption) (*services.KeepAlive, error) - // GenerateUserCerts generates a set of user certificates for use by `tctl - // auth sign`. - GenerateUserCerts(ctx context.Context, in *UserCertsRequest, opts ...grpc.CallOption) (*Certs, error) - // GetUser gets a user resource by name. - GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*services.UserV2, error) - // GetUsers gets all current user resources. - GetUsers(ctx context.Context, in *GetUsersRequest, opts ...grpc.CallOption) (AuthService_GetUsersClient, error) - // GetAccessRequests gets all pending access requests. - GetAccessRequests(ctx context.Context, in *services.AccessRequestFilter, opts ...grpc.CallOption) (*AccessRequests, error) - // CreateAccessRequest creates a new access request. - CreateAccessRequest(ctx context.Context, in *services.AccessRequestV3, opts ...grpc.CallOption) (*empty.Empty, error) - // DeleteAccessRequest deletes an access request. - DeleteAccessRequest(ctx context.Context, in *RequestID, opts ...grpc.CallOption) (*empty.Empty, error) - // SetAccessRequestState sets the state of an access request. - SetAccessRequestState(ctx context.Context, in *RequestStateSetter, opts ...grpc.CallOption) (*empty.Empty, error) - // GetPluginData gets all plugin data matching the supplied filter. - GetPluginData(ctx context.Context, in *services.PluginDataFilter, opts ...grpc.CallOption) (*PluginDataSeq, error) - // UpdatePluginData updates a plugin's resource-specific datastore. - UpdatePluginData(ctx context.Context, in *services.PluginDataUpdateParams, opts ...grpc.CallOption) (*empty.Empty, error) - // Ping gets basic info about the auth server. This method is intended - // to mimic the behavior of the proxy's Ping method, and may be used by - // clients for verification or configuration on startup. - Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) - // RotateResetPasswordTokenSecrets rotates token secrets for a given tokenID. - RotateResetPasswordTokenSecrets(ctx context.Context, in *RotateResetPasswordTokenSecretsRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenSecretsV3, error) - // GetResetPasswordToken returns a token. - GetResetPasswordToken(ctx context.Context, in *GetResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) - // CreateResetPasswordToken creates ResetPasswordToken. - CreateResetPasswordToken(ctx context.Context, in *CreateResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) - // CreateUser inserts a new user entry to a backend. - CreateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) - // UpdateUser updates an existing user in a backend. - UpdateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) - // DeleteUser deletes an existing user in a backend by username. - DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*empty.Empty, error) - // AcquireSemaphore acquires lease with requested resources from semaphore. - AcquireSemaphore(ctx context.Context, in *services.AcquireSemaphoreRequest, opts ...grpc.CallOption) (*services.SemaphoreLease, error) - // KeepAliveSemaphoreLease updates semaphore lease. - KeepAliveSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) - // CancelSemaphoreLease cancels semaphore lease early. - CancelSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) - // GetSemaphores returns a list of all semaphores matching the supplied filter. - GetSemaphores(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*Semaphores, error) - // DeleteSemaphore deletes a semaphore matching the supplied filter. - DeleteSemaphore(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*empty.Empty, error) +// AuditStreamRequest contains stream request - event or stream control request +type AuditStreamRequest struct { + // Request is either stream request - create, resume or complete stream + // or event submitted as a part of the stream + // + // Types that are valid to be assigned to Request: + // *AuditStreamRequest_CreateStream + // *AuditStreamRequest_ResumeStream + // *AuditStreamRequest_CompleteStream + // *AuditStreamRequest_FlushAndCloseStream + // *AuditStreamRequest_Event + Request isAuditStreamRequest_Request `protobuf_oneof:"Request"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuditStreamRequest) Reset() { *m = AuditStreamRequest{} } +func (m *AuditStreamRequest) String() string { return proto.CompactTextString(m) } +func (*AuditStreamRequest) ProtoMessage() {} +func (*AuditStreamRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_auth_4edeb1cba1dbd286, []int{18} +} +func (m *AuditStreamRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -type authServiceClient struct { - cc *grpc.ClientConn +func (m *AuditStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuditStreamRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } - -func NewAuthServiceClient(cc *grpc.ClientConn) AuthServiceClient { - return &authServiceClient{cc} +func (dst *AuditStreamRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditStreamRequest.Merge(dst, src) } - -func (c *authServiceClient) SendKeepAlives(ctx context.Context, opts ...grpc.CallOption) (AuthService_SendKeepAlivesClient, error) { - stream, err := c.cc.NewStream(ctx, &_AuthService_serviceDesc.Streams[0], "/proto.AuthService/SendKeepAlives", opts...) - if err != nil { - return nil, err - } - x := &authServiceSendKeepAlivesClient{stream} - return x, nil +func (m *AuditStreamRequest) XXX_Size() int { + return m.Size() } - -type AuthService_SendKeepAlivesClient interface { - Send(*services.KeepAlive) error - CloseAndRecv() (*empty.Empty, error) - grpc.ClientStream +func (m *AuditStreamRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AuditStreamRequest.DiscardUnknown(m) } -type authServiceSendKeepAlivesClient struct { - grpc.ClientStream -} +var xxx_messageInfo_AuditStreamRequest proto.InternalMessageInfo -func (x *authServiceSendKeepAlivesClient) Send(m *services.KeepAlive) error { - return x.ClientStream.SendMsg(m) +type isAuditStreamRequest_Request interface { + isAuditStreamRequest_Request() + MarshalTo([]byte) (int, error) + Size() int } -func (x *authServiceSendKeepAlivesClient) CloseAndRecv() (*empty.Empty, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(empty.Empty) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil +type AuditStreamRequest_CreateStream struct { + CreateStream *CreateStream `protobuf:"bytes,1,opt,name=CreateStream,oneof"` } - -func (c *authServiceClient) WatchEvents(ctx context.Context, in *Watch, opts ...grpc.CallOption) (AuthService_WatchEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &_AuthService_serviceDesc.Streams[1], "/proto.AuthService/WatchEvents", opts...) - if err != nil { - return nil, err - } - x := &authServiceWatchEventsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil +type AuditStreamRequest_ResumeStream struct { + ResumeStream *ResumeStream `protobuf:"bytes,2,opt,name=ResumeStream,oneof"` } - -type AuthService_WatchEventsClient interface { - Recv() (*Event, error) - grpc.ClientStream +type AuditStreamRequest_CompleteStream struct { + CompleteStream *CompleteStream `protobuf:"bytes,3,opt,name=CompleteStream,oneof"` } - -type authServiceWatchEventsClient struct { - grpc.ClientStream +type AuditStreamRequest_FlushAndCloseStream struct { + FlushAndCloseStream *FlushAndCloseStream `protobuf:"bytes,4,opt,name=FlushAndCloseStream,oneof"` } - -func (x *authServiceWatchEventsClient) Recv() (*Event, error) { - m := new(Event) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil +type AuditStreamRequest_Event struct { + Event *events.OneOf `protobuf:"bytes,5,opt,name=Event,oneof"` } -func (c *authServiceClient) UpsertNode(ctx context.Context, in *services.ServerV2, opts ...grpc.CallOption) (*services.KeepAlive, error) { - out := new(services.KeepAlive) - err := c.cc.Invoke(ctx, "/proto.AuthService/UpsertNode", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*AuditStreamRequest_CreateStream) isAuditStreamRequest_Request() {} +func (*AuditStreamRequest_ResumeStream) isAuditStreamRequest_Request() {} +func (*AuditStreamRequest_CompleteStream) isAuditStreamRequest_Request() {} +func (*AuditStreamRequest_FlushAndCloseStream) isAuditStreamRequest_Request() {} +func (*AuditStreamRequest_Event) isAuditStreamRequest_Request() {} -func (c *authServiceClient) GenerateUserCerts(ctx context.Context, in *UserCertsRequest, opts ...grpc.CallOption) (*Certs, error) { - out := new(Certs) - err := c.cc.Invoke(ctx, "/proto.AuthService/GenerateUserCerts", in, out, opts...) - if err != nil { - return nil, err +func (m *AuditStreamRequest) GetRequest() isAuditStreamRequest_Request { + if m != nil { + return m.Request } - return out, nil + return nil } -func (c *authServiceClient) GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*services.UserV2, error) { - out := new(services.UserV2) - err := c.cc.Invoke(ctx, "/proto.AuthService/GetUser", in, out, opts...) - if err != nil { - return nil, err +func (m *AuditStreamRequest) GetCreateStream() *CreateStream { + if x, ok := m.GetRequest().(*AuditStreamRequest_CreateStream); ok { + return x.CreateStream } - return out, nil + return nil } -func (c *authServiceClient) GetUsers(ctx context.Context, in *GetUsersRequest, opts ...grpc.CallOption) (AuthService_GetUsersClient, error) { - stream, err := c.cc.NewStream(ctx, &_AuthService_serviceDesc.Streams[2], "/proto.AuthService/GetUsers", opts...) - if err != nil { - return nil, err - } - x := &authServiceGetUsersClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err +func (m *AuditStreamRequest) GetResumeStream() *ResumeStream { + if x, ok := m.GetRequest().(*AuditStreamRequest_ResumeStream); ok { + return x.ResumeStream } - return x, nil -} - -type AuthService_GetUsersClient interface { - Recv() (*services.UserV2, error) - grpc.ClientStream -} - -type authServiceGetUsersClient struct { - grpc.ClientStream + return nil } -func (x *authServiceGetUsersClient) Recv() (*services.UserV2, error) { - m := new(services.UserV2) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (m *AuditStreamRequest) GetCompleteStream() *CompleteStream { + if x, ok := m.GetRequest().(*AuditStreamRequest_CompleteStream); ok { + return x.CompleteStream } - return m, nil + return nil } -func (c *authServiceClient) GetAccessRequests(ctx context.Context, in *services.AccessRequestFilter, opts ...grpc.CallOption) (*AccessRequests, error) { - out := new(AccessRequests) - err := c.cc.Invoke(ctx, "/proto.AuthService/GetAccessRequests", in, out, opts...) - if err != nil { - return nil, err +func (m *AuditStreamRequest) GetFlushAndCloseStream() *FlushAndCloseStream { + if x, ok := m.GetRequest().(*AuditStreamRequest_FlushAndCloseStream); ok { + return x.FlushAndCloseStream } - return out, nil + return nil } -func (c *authServiceClient) CreateAccessRequest(ctx context.Context, in *services.AccessRequestV3, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/CreateAccessRequest", in, out, opts...) - if err != nil { - return nil, err +func (m *AuditStreamRequest) GetEvent() *events.OneOf { + if x, ok := m.GetRequest().(*AuditStreamRequest_Event); ok { + return x.Event } - return out, nil + return nil } -func (c *authServiceClient) DeleteAccessRequest(ctx context.Context, in *RequestID, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/DeleteAccessRequest", in, out, opts...) - if err != nil { - return nil, err +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AuditStreamRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AuditStreamRequest_OneofMarshaler, _AuditStreamRequest_OneofUnmarshaler, _AuditStreamRequest_OneofSizer, []interface{}{ + (*AuditStreamRequest_CreateStream)(nil), + (*AuditStreamRequest_ResumeStream)(nil), + (*AuditStreamRequest_CompleteStream)(nil), + (*AuditStreamRequest_FlushAndCloseStream)(nil), + (*AuditStreamRequest_Event)(nil), + } +} + +func _AuditStreamRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AuditStreamRequest) + // Request + switch x := m.Request.(type) { + case *AuditStreamRequest_CreateStream: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CreateStream); err != nil { + return err + } + case *AuditStreamRequest_ResumeStream: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResumeStream); err != nil { + return err + } + case *AuditStreamRequest_CompleteStream: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CompleteStream); err != nil { + return err + } + case *AuditStreamRequest_FlushAndCloseStream: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FlushAndCloseStream); err != nil { + return err + } + case *AuditStreamRequest_Event: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Event); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AuditStreamRequest.Request has unexpected type %T", x) } - return out, nil + return nil } -func (c *authServiceClient) SetAccessRequestState(ctx context.Context, in *RequestStateSetter, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/SetAccessRequestState", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func _AuditStreamRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AuditStreamRequest) + switch tag { + case 1: // Request.CreateStream + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CreateStream) + err := b.DecodeMessage(msg) + m.Request = &AuditStreamRequest_CreateStream{msg} + return true, err + case 2: // Request.ResumeStream + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResumeStream) + err := b.DecodeMessage(msg) + m.Request = &AuditStreamRequest_ResumeStream{msg} + return true, err + case 3: // Request.CompleteStream + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CompleteStream) + err := b.DecodeMessage(msg) + m.Request = &AuditStreamRequest_CompleteStream{msg} + return true, err + case 4: // Request.FlushAndCloseStream + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FlushAndCloseStream) + err := b.DecodeMessage(msg) + m.Request = &AuditStreamRequest_FlushAndCloseStream{msg} + return true, err + case 5: // Request.Event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(events.OneOf) + err := b.DecodeMessage(msg) + m.Request = &AuditStreamRequest_Event{msg} + return true, err + default: + return false, nil + } } -func (c *authServiceClient) GetPluginData(ctx context.Context, in *services.PluginDataFilter, opts ...grpc.CallOption) (*PluginDataSeq, error) { - out := new(PluginDataSeq) - err := c.cc.Invoke(ctx, "/proto.AuthService/GetPluginData", in, out, opts...) - if err != nil { - return nil, err +func _AuditStreamRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AuditStreamRequest) + // Request + switch x := m.Request.(type) { + case *AuditStreamRequest_CreateStream: + s := proto.Size(x.CreateStream) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditStreamRequest_ResumeStream: + s := proto.Size(x.ResumeStream) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditStreamRequest_CompleteStream: + s := proto.Size(x.CompleteStream) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditStreamRequest_FlushAndCloseStream: + s := proto.Size(x.FlushAndCloseStream) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AuditStreamRequest_Event: + s := proto.Size(x.Event) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } - return out, nil + return n } -func (c *authServiceClient) UpdatePluginData(ctx context.Context, in *services.PluginDataUpdateParams, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/UpdatePluginData", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// AuditStreamStatus returns audit stream status +// with corresponding upload ID +type AuditStreamStatus struct { + // UploadID is upload ID associated with the stream, + // can be used to resume the stream + UploadID string `protobuf:"bytes,1,opt,name=UploadID,proto3" json:"UploadID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (c *authServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { - out := new(PingResponse) - err := c.cc.Invoke(ctx, "/proto.AuthService/Ping", in, out, opts...) - if err != nil { - return nil, err +func (m *AuditStreamStatus) Reset() { *m = AuditStreamStatus{} } +func (m *AuditStreamStatus) String() string { return proto.CompactTextString(m) } +func (*AuditStreamStatus) ProtoMessage() {} +func (*AuditStreamStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_auth_4edeb1cba1dbd286, []int{19} +} +func (m *AuditStreamStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditStreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuditStreamStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (dst *AuditStreamStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditStreamStatus.Merge(dst, src) +} +func (m *AuditStreamStatus) XXX_Size() int { + return m.Size() +} +func (m *AuditStreamStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AuditStreamStatus.DiscardUnknown(m) } -func (c *authServiceClient) RotateResetPasswordTokenSecrets(ctx context.Context, in *RotateResetPasswordTokenSecretsRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenSecretsV3, error) { - out := new(services.ResetPasswordTokenSecretsV3) - err := c.cc.Invoke(ctx, "/proto.AuthService/RotateResetPasswordTokenSecrets", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_AuditStreamStatus proto.InternalMessageInfo + +func (m *AuditStreamStatus) GetUploadID() string { + if m != nil { + return m.UploadID } - return out, nil + return "" } -func (c *authServiceClient) GetResetPasswordToken(ctx context.Context, in *GetResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) { - out := new(services.ResetPasswordTokenV3) - err := c.cc.Invoke(ctx, "/proto.AuthService/GetResetPasswordToken", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// CreateStream creates stream for a new session ID +type CreateStream struct { + SessionID string `protobuf:"bytes,1,opt,name=SessionID,proto3" json:"SessionID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (c *authServiceClient) CreateResetPasswordToken(ctx context.Context, in *CreateResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) { - out := new(services.ResetPasswordTokenV3) - err := c.cc.Invoke(ctx, "/proto.AuthService/CreateResetPasswordToken", in, out, opts...) - if err != nil { - return nil, err +func (m *CreateStream) Reset() { *m = CreateStream{} } +func (m *CreateStream) String() string { return proto.CompactTextString(m) } +func (*CreateStream) ProtoMessage() {} +func (*CreateStream) Descriptor() ([]byte, []int) { + return fileDescriptor_auth_4edeb1cba1dbd286, []int{20} +} +func (m *CreateStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateStream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (dst *CreateStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateStream.Merge(dst, src) +} +func (m *CreateStream) XXX_Size() int { + return m.Size() +} +func (m *CreateStream) XXX_DiscardUnknown() { + xxx_messageInfo_CreateStream.DiscardUnknown(m) } -func (c *authServiceClient) CreateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/CreateUser", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_CreateStream proto.InternalMessageInfo + +func (m *CreateStream) GetSessionID() string { + if m != nil { + return m.SessionID } - return out, nil + return "" } -func (c *authServiceClient) UpdateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/UpdateUser", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// ResumeStream resumes stream that was previously created +type ResumeStream struct { + // SessionID is a session ID of the stream + SessionID string `protobuf:"bytes,1,opt,name=SessionID,proto3" json:"SessionID,omitempty"` + // UploadID is upload ID to resume + UploadID string `protobuf:"bytes,2,opt,name=UploadID,proto3" json:"UploadID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (c *authServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/DeleteUser", in, out, opts...) - if err != nil { - return nil, err +func (m *ResumeStream) Reset() { *m = ResumeStream{} } +func (m *ResumeStream) String() string { return proto.CompactTextString(m) } +func (*ResumeStream) ProtoMessage() {} +func (*ResumeStream) Descriptor() ([]byte, []int) { + return fileDescriptor_auth_4edeb1cba1dbd286, []int{21} +} +func (m *ResumeStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResumeStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResumeStream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (dst *ResumeStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResumeStream.Merge(dst, src) +} +func (m *ResumeStream) XXX_Size() int { + return m.Size() +} +func (m *ResumeStream) XXX_DiscardUnknown() { + xxx_messageInfo_ResumeStream.DiscardUnknown(m) } -func (c *authServiceClient) AcquireSemaphore(ctx context.Context, in *services.AcquireSemaphoreRequest, opts ...grpc.CallOption) (*services.SemaphoreLease, error) { - out := new(services.SemaphoreLease) - err := c.cc.Invoke(ctx, "/proto.AuthService/AcquireSemaphore", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_ResumeStream proto.InternalMessageInfo + +func (m *ResumeStream) GetSessionID() string { + if m != nil { + return m.SessionID } - return out, nil + return "" } -func (c *authServiceClient) KeepAliveSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/KeepAliveSemaphoreLease", in, out, opts...) - if err != nil { - return nil, err +func (m *ResumeStream) GetUploadID() string { + if m != nil { + return m.UploadID } - return out, nil + return "" } -func (c *authServiceClient) CancelSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/CancelSemaphoreLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// CompleteStream completes the stream +// and uploads it to the session server +type CompleteStream struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (c *authServiceClient) GetSemaphores(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*Semaphores, error) { - out := new(Semaphores) - err := c.cc.Invoke(ctx, "/proto.AuthService/GetSemaphores", in, out, opts...) - if err != nil { - return nil, err +func (m *CompleteStream) Reset() { *m = CompleteStream{} } +func (m *CompleteStream) String() string { return proto.CompactTextString(m) } +func (*CompleteStream) ProtoMessage() {} +func (*CompleteStream) Descriptor() ([]byte, []int) { + return fileDescriptor_auth_4edeb1cba1dbd286, []int{22} +} +func (m *CompleteStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CompleteStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CompleteStream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (dst *CompleteStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompleteStream.Merge(dst, src) +} +func (m *CompleteStream) XXX_Size() int { + return m.Size() +} +func (m *CompleteStream) XXX_DiscardUnknown() { + xxx_messageInfo_CompleteStream.DiscardUnknown(m) } -func (c *authServiceClient) DeleteSemaphore(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/proto.AuthService/DeleteSemaphore", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_CompleteStream proto.InternalMessageInfo + +// FlushAndCloseStream flushes the stream data and closes the stream +type FlushAndCloseStream struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FlushAndCloseStream) Reset() { *m = FlushAndCloseStream{} } +func (m *FlushAndCloseStream) String() string { return proto.CompactTextString(m) } +func (*FlushAndCloseStream) ProtoMessage() {} +func (*FlushAndCloseStream) Descriptor() ([]byte, []int) { + return fileDescriptor_auth_4edeb1cba1dbd286, []int{23} +} +func (m *FlushAndCloseStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlushAndCloseStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FlushAndCloseStream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (dst *FlushAndCloseStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlushAndCloseStream.Merge(dst, src) +} +func (m *FlushAndCloseStream) XXX_Size() int { + return m.Size() +} +func (m *FlushAndCloseStream) XXX_DiscardUnknown() { + xxx_messageInfo_FlushAndCloseStream.DiscardUnknown(m) } -// Server API for AuthService service +var xxx_messageInfo_FlushAndCloseStream proto.InternalMessageInfo -type AuthServiceServer interface { - // SendKeepAlives allows node to send a stream of keep alive requests - SendKeepAlives(AuthService_SendKeepAlivesServer) error - // WatchEvents returns a new stream of cluster events - WatchEvents(*Watch, AuthService_WatchEventsServer) error - // UpsertNode upserts node - UpsertNode(context.Context, *services.ServerV2) (*services.KeepAlive, error) - // GenerateUserCerts generates a set of user certificates for use by `tctl - // auth sign`. - GenerateUserCerts(context.Context, *UserCertsRequest) (*Certs, error) - // GetUser gets a user resource by name. - GetUser(context.Context, *GetUserRequest) (*services.UserV2, error) - // GetUsers gets all current user resources. - GetUsers(*GetUsersRequest, AuthService_GetUsersServer) error - // GetAccessRequests gets all pending access requests. - GetAccessRequests(context.Context, *services.AccessRequestFilter) (*AccessRequests, error) +func init() { + proto.RegisterType((*Event)(nil), "proto.Event") + proto.RegisterType((*Watch)(nil), "proto.Watch") + proto.RegisterType((*WatchKind)(nil), "proto.WatchKind") + proto.RegisterMapType((map[string]string)(nil), "proto.WatchKind.FilterEntry") + proto.RegisterType((*Certs)(nil), "proto.Certs") + proto.RegisterType((*UserCertsRequest)(nil), "proto.UserCertsRequest") + proto.RegisterType((*GetUserRequest)(nil), "proto.GetUserRequest") + proto.RegisterType((*GetUsersRequest)(nil), "proto.GetUsersRequest") + proto.RegisterType((*AccessRequests)(nil), "proto.AccessRequests") + proto.RegisterType((*PluginDataSeq)(nil), "proto.PluginDataSeq") + proto.RegisterType((*RequestStateSetter)(nil), "proto.RequestStateSetter") + proto.RegisterType((*RequestID)(nil), "proto.RequestID") + proto.RegisterType((*RotateResetPasswordTokenSecretsRequest)(nil), "proto.RotateResetPasswordTokenSecretsRequest") + proto.RegisterType((*GetResetPasswordTokenRequest)(nil), "proto.GetResetPasswordTokenRequest") + proto.RegisterType((*CreateResetPasswordTokenRequest)(nil), "proto.CreateResetPasswordTokenRequest") + proto.RegisterType((*PingRequest)(nil), "proto.PingRequest") + proto.RegisterType((*PingResponse)(nil), "proto.PingResponse") + proto.RegisterType((*DeleteUserRequest)(nil), "proto.DeleteUserRequest") + proto.RegisterType((*Semaphores)(nil), "proto.Semaphores") + proto.RegisterType((*AuditStreamRequest)(nil), "proto.AuditStreamRequest") + proto.RegisterType((*AuditStreamStatus)(nil), "proto.AuditStreamStatus") + proto.RegisterType((*CreateStream)(nil), "proto.CreateStream") + proto.RegisterType((*ResumeStream)(nil), "proto.ResumeStream") + proto.RegisterType((*CompleteStream)(nil), "proto.CompleteStream") + proto.RegisterType((*FlushAndCloseStream)(nil), "proto.FlushAndCloseStream") + proto.RegisterEnum("proto.Operation", Operation_name, Operation_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for AuthService service + +type AuthServiceClient interface { + // SendKeepAlives allows node to send a stream of keep alive requests + SendKeepAlives(ctx context.Context, opts ...grpc.CallOption) (AuthService_SendKeepAlivesClient, error) + // WatchEvents returns a new stream of cluster events + WatchEvents(ctx context.Context, in *Watch, opts ...grpc.CallOption) (AuthService_WatchEventsClient, error) + // UpsertNode upserts node + UpsertNode(ctx context.Context, in *services.ServerV2, opts ...grpc.CallOption) (*services.KeepAlive, error) + // GenerateUserCerts generates a set of user certificates for use by `tctl + // auth sign`. + GenerateUserCerts(ctx context.Context, in *UserCertsRequest, opts ...grpc.CallOption) (*Certs, error) + // GetUser gets a user resource by name. + GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*services.UserV2, error) + // GetUsers gets all current user resources. + GetUsers(ctx context.Context, in *GetUsersRequest, opts ...grpc.CallOption) (AuthService_GetUsersClient, error) + // GetAccessRequests gets all pending access requests. + GetAccessRequests(ctx context.Context, in *services.AccessRequestFilter, opts ...grpc.CallOption) (*AccessRequests, error) // CreateAccessRequest creates a new access request. - CreateAccessRequest(context.Context, *services.AccessRequestV3) (*empty.Empty, error) + CreateAccessRequest(ctx context.Context, in *services.AccessRequestV3, opts ...grpc.CallOption) (*empty.Empty, error) // DeleteAccessRequest deletes an access request. - DeleteAccessRequest(context.Context, *RequestID) (*empty.Empty, error) + DeleteAccessRequest(ctx context.Context, in *RequestID, opts ...grpc.CallOption) (*empty.Empty, error) // SetAccessRequestState sets the state of an access request. - SetAccessRequestState(context.Context, *RequestStateSetter) (*empty.Empty, error) + SetAccessRequestState(ctx context.Context, in *RequestStateSetter, opts ...grpc.CallOption) (*empty.Empty, error) // GetPluginData gets all plugin data matching the supplied filter. - GetPluginData(context.Context, *services.PluginDataFilter) (*PluginDataSeq, error) + GetPluginData(ctx context.Context, in *services.PluginDataFilter, opts ...grpc.CallOption) (*PluginDataSeq, error) // UpdatePluginData updates a plugin's resource-specific datastore. - UpdatePluginData(context.Context, *services.PluginDataUpdateParams) (*empty.Empty, error) + UpdatePluginData(ctx context.Context, in *services.PluginDataUpdateParams, opts ...grpc.CallOption) (*empty.Empty, error) // Ping gets basic info about the auth server. This method is intended // to mimic the behavior of the proxy's Ping method, and may be used by // clients for verification or configuration on startup. - Ping(context.Context, *PingRequest) (*PingResponse, error) + Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) // RotateResetPasswordTokenSecrets rotates token secrets for a given tokenID. - RotateResetPasswordTokenSecrets(context.Context, *RotateResetPasswordTokenSecretsRequest) (*services.ResetPasswordTokenSecretsV3, error) + RotateResetPasswordTokenSecrets(ctx context.Context, in *RotateResetPasswordTokenSecretsRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenSecretsV3, error) // GetResetPasswordToken returns a token. - GetResetPasswordToken(context.Context, *GetResetPasswordTokenRequest) (*services.ResetPasswordTokenV3, error) + GetResetPasswordToken(ctx context.Context, in *GetResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) // CreateResetPasswordToken creates ResetPasswordToken. - CreateResetPasswordToken(context.Context, *CreateResetPasswordTokenRequest) (*services.ResetPasswordTokenV3, error) + CreateResetPasswordToken(ctx context.Context, in *CreateResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) // CreateUser inserts a new user entry to a backend. - CreateUser(context.Context, *services.UserV2) (*empty.Empty, error) + CreateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) // UpdateUser updates an existing user in a backend. - UpdateUser(context.Context, *services.UserV2) (*empty.Empty, error) + UpdateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) // DeleteUser deletes an existing user in a backend by username. - DeleteUser(context.Context, *DeleteUserRequest) (*empty.Empty, error) + DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*empty.Empty, error) // AcquireSemaphore acquires lease with requested resources from semaphore. - AcquireSemaphore(context.Context, *services.AcquireSemaphoreRequest) (*services.SemaphoreLease, error) + AcquireSemaphore(ctx context.Context, in *services.AcquireSemaphoreRequest, opts ...grpc.CallOption) (*services.SemaphoreLease, error) // KeepAliveSemaphoreLease updates semaphore lease. - KeepAliveSemaphoreLease(context.Context, *services.SemaphoreLease) (*empty.Empty, error) + KeepAliveSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) // CancelSemaphoreLease cancels semaphore lease early. - CancelSemaphoreLease(context.Context, *services.SemaphoreLease) (*empty.Empty, error) + CancelSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) // GetSemaphores returns a list of all semaphores matching the supplied filter. - GetSemaphores(context.Context, *services.SemaphoreFilter) (*Semaphores, error) + GetSemaphores(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*Semaphores, error) // DeleteSemaphore deletes a semaphore matching the supplied filter. - DeleteSemaphore(context.Context, *services.SemaphoreFilter) (*empty.Empty, error) + DeleteSemaphore(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*empty.Empty, error) + // EmitAuditEvent emits audit event + EmitAuditEvent(ctx context.Context, in *events.OneOf, opts ...grpc.CallOption) (*empty.Empty, error) + // CreateAuditStream creates or resumes audit events streams + CreateAuditStream(ctx context.Context, opts ...grpc.CallOption) (AuthService_CreateAuditStreamClient, error) } -func RegisterAuthServiceServer(s *grpc.Server, srv AuthServiceServer) { - s.RegisterService(&_AuthService_serviceDesc, srv) +type authServiceClient struct { + cc *grpc.ClientConn } -func _AuthService_SendKeepAlives_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(AuthServiceServer).SendKeepAlives(&authServiceSendKeepAlivesServer{stream}) +func NewAuthServiceClient(cc *grpc.ClientConn) AuthServiceClient { + return &authServiceClient{cc} } -type AuthService_SendKeepAlivesServer interface { - SendAndClose(*empty.Empty) error - Recv() (*services.KeepAlive, error) - grpc.ServerStream +func (c *authServiceClient) SendKeepAlives(ctx context.Context, opts ...grpc.CallOption) (AuthService_SendKeepAlivesClient, error) { + stream, err := c.cc.NewStream(ctx, &_AuthService_serviceDesc.Streams[0], "/proto.AuthService/SendKeepAlives", opts...) + if err != nil { + return nil, err + } + x := &authServiceSendKeepAlivesClient{stream} + return x, nil } -type authServiceSendKeepAlivesServer struct { - grpc.ServerStream +type AuthService_SendKeepAlivesClient interface { + Send(*services.KeepAlive) error + CloseAndRecv() (*empty.Empty, error) + grpc.ClientStream } -func (x *authServiceSendKeepAlivesServer) SendAndClose(m *empty.Empty) error { - return x.ServerStream.SendMsg(m) +type authServiceSendKeepAlivesClient struct { + grpc.ClientStream } -func (x *authServiceSendKeepAlivesServer) Recv() (*services.KeepAlive, error) { - m := new(services.KeepAlive) - if err := x.ServerStream.RecvMsg(m); err != nil { +func (x *authServiceSendKeepAlivesClient) Send(m *services.KeepAlive) error { + return x.ClientStream.SendMsg(m) +} + +func (x *authServiceSendKeepAlivesClient) CloseAndRecv() (*empty.Empty, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(empty.Empty) + if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func _AuthService_WatchEvents_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(Watch) - if err := stream.RecvMsg(m); err != nil { - return err +func (c *authServiceClient) WatchEvents(ctx context.Context, in *Watch, opts ...grpc.CallOption) (AuthService_WatchEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &_AuthService_serviceDesc.Streams[1], "/proto.AuthService/WatchEvents", opts...) + if err != nil { + return nil, err } - return srv.(AuthServiceServer).WatchEvents(m, &authServiceWatchEventsServer{stream}) -} - -type AuthService_WatchEventsServer interface { - Send(*Event) error - grpc.ServerStream + x := &authServiceWatchEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -type authServiceWatchEventsServer struct { - grpc.ServerStream +type AuthService_WatchEventsClient interface { + Recv() (*Event, error) + grpc.ClientStream } -func (x *authServiceWatchEventsServer) Send(m *Event) error { - return x.ServerStream.SendMsg(m) +type authServiceWatchEventsClient struct { + grpc.ClientStream } -func _AuthService_UpsertNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.ServerV2) - if err := dec(in); err != nil { +func (x *authServiceWatchEventsClient) Recv() (*Event, error) { + m := new(Event) + if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).UpsertNode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/UpsertNode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).UpsertNode(ctx, req.(*services.ServerV2)) - } - return interceptor(ctx, in, info, handler) + return m, nil } -func _AuthService_GenerateUserCerts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserCertsRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) UpsertNode(ctx context.Context, in *services.ServerV2, opts ...grpc.CallOption) (*services.KeepAlive, error) { + out := new(services.KeepAlive) + err := c.cc.Invoke(ctx, "/proto.AuthService/UpsertNode", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).GenerateUserCerts(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/GenerateUserCerts", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).GenerateUserCerts(ctx, req.(*UserCertsRequest)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_GetUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetUserRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) GenerateUserCerts(ctx context.Context, in *UserCertsRequest, opts ...grpc.CallOption) (*Certs, error) { + out := new(Certs) + err := c.cc.Invoke(ctx, "/proto.AuthService/GenerateUserCerts", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).GetUser(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/GetUser", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).GetUser(ctx, req.(*GetUserRequest)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_GetUsers_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(GetUsersRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (c *authServiceClient) GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*services.UserV2, error) { + out := new(services.UserV2) + err := c.cc.Invoke(ctx, "/proto.AuthService/GetUser", in, out, opts...) + if err != nil { + return nil, err } - return srv.(AuthServiceServer).GetUsers(m, &authServiceGetUsersServer{stream}) + return out, nil } -type AuthService_GetUsersServer interface { - Send(*services.UserV2) error - grpc.ServerStream +func (c *authServiceClient) GetUsers(ctx context.Context, in *GetUsersRequest, opts ...grpc.CallOption) (AuthService_GetUsersClient, error) { + stream, err := c.cc.NewStream(ctx, &_AuthService_serviceDesc.Streams[2], "/proto.AuthService/GetUsers", opts...) + if err != nil { + return nil, err + } + x := &authServiceGetUsersClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -type authServiceGetUsersServer struct { - grpc.ServerStream +type AuthService_GetUsersClient interface { + Recv() (*services.UserV2, error) + grpc.ClientStream } -func (x *authServiceGetUsersServer) Send(m *services.UserV2) error { - return x.ServerStream.SendMsg(m) +type authServiceGetUsersClient struct { + grpc.ClientStream } -func _AuthService_GetAccessRequests_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.AccessRequestFilter) - if err := dec(in); err != nil { +func (x *authServiceGetUsersClient) Recv() (*services.UserV2, error) { + m := new(services.UserV2) + if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).GetAccessRequests(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/GetAccessRequests", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).GetAccessRequests(ctx, req.(*services.AccessRequestFilter)) - } - return interceptor(ctx, in, info, handler) + return m, nil } -func _AuthService_CreateAccessRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.AccessRequestV3) - if err := dec(in); err != nil { +func (c *authServiceClient) GetAccessRequests(ctx context.Context, in *services.AccessRequestFilter, opts ...grpc.CallOption) (*AccessRequests, error) { + out := new(AccessRequests) + err := c.cc.Invoke(ctx, "/proto.AuthService/GetAccessRequests", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).CreateAccessRequest(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/CreateAccessRequest", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).CreateAccessRequest(ctx, req.(*services.AccessRequestV3)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_DeleteAccessRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestID) - if err := dec(in); err != nil { +func (c *authServiceClient) CreateAccessRequest(ctx context.Context, in *services.AccessRequestV3, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/CreateAccessRequest", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).DeleteAccessRequest(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/DeleteAccessRequest", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).DeleteAccessRequest(ctx, req.(*RequestID)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_SetAccessRequestState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestStateSetter) - if err := dec(in); err != nil { +func (c *authServiceClient) DeleteAccessRequest(ctx context.Context, in *RequestID, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/DeleteAccessRequest", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).SetAccessRequestState(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/SetAccessRequestState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).SetAccessRequestState(ctx, req.(*RequestStateSetter)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_GetPluginData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.PluginDataFilter) - if err := dec(in); err != nil { +func (c *authServiceClient) SetAccessRequestState(ctx context.Context, in *RequestStateSetter, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/SetAccessRequestState", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).GetPluginData(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/GetPluginData", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).GetPluginData(ctx, req.(*services.PluginDataFilter)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_UpdatePluginData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.PluginDataUpdateParams) - if err := dec(in); err != nil { +func (c *authServiceClient) GetPluginData(ctx context.Context, in *services.PluginDataFilter, opts ...grpc.CallOption) (*PluginDataSeq, error) { + out := new(PluginDataSeq) + err := c.cc.Invoke(ctx, "/proto.AuthService/GetPluginData", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).UpdatePluginData(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/UpdatePluginData", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).UpdatePluginData(ctx, req.(*services.PluginDataUpdateParams)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PingRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) UpdatePluginData(ctx context.Context, in *services.PluginDataUpdateParams, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/UpdatePluginData", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).Ping(ctx, req.(*PingRequest)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_RotateResetPasswordTokenSecrets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RotateResetPasswordTokenSecretsRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { + out := new(PingResponse) + err := c.cc.Invoke(ctx, "/proto.AuthService/Ping", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).RotateResetPasswordTokenSecrets(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/RotateResetPasswordTokenSecrets", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).RotateResetPasswordTokenSecrets(ctx, req.(*RotateResetPasswordTokenSecretsRequest)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_GetResetPasswordToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetResetPasswordTokenRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) RotateResetPasswordTokenSecrets(ctx context.Context, in *RotateResetPasswordTokenSecretsRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenSecretsV3, error) { + out := new(services.ResetPasswordTokenSecretsV3) + err := c.cc.Invoke(ctx, "/proto.AuthService/RotateResetPasswordTokenSecrets", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).GetResetPasswordToken(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/GetResetPasswordToken", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).GetResetPasswordToken(ctx, req.(*GetResetPasswordTokenRequest)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_CreateResetPasswordToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateResetPasswordTokenRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) GetResetPasswordToken(ctx context.Context, in *GetResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) { + out := new(services.ResetPasswordTokenV3) + err := c.cc.Invoke(ctx, "/proto.AuthService/GetResetPasswordToken", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).CreateResetPasswordToken(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/CreateResetPasswordToken", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).CreateResetPasswordToken(ctx, req.(*CreateResetPasswordTokenRequest)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_CreateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.UserV2) - if err := dec(in); err != nil { +func (c *authServiceClient) CreateResetPasswordToken(ctx context.Context, in *CreateResetPasswordTokenRequest, opts ...grpc.CallOption) (*services.ResetPasswordTokenV3, error) { + out := new(services.ResetPasswordTokenV3) + err := c.cc.Invoke(ctx, "/proto.AuthService/CreateResetPasswordToken", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).CreateUser(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/CreateUser", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).CreateUser(ctx, req.(*services.UserV2)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_UpdateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.UserV2) - if err := dec(in); err != nil { +func (c *authServiceClient) CreateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/CreateUser", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).UpdateUser(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/UpdateUser", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).UpdateUser(ctx, req.(*services.UserV2)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_DeleteUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteUserRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) UpdateUser(ctx context.Context, in *services.UserV2, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/UpdateUser", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).DeleteUser(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.AuthService/DeleteUser", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).DeleteUser(ctx, req.(*DeleteUserRequest)) - } - return interceptor(ctx, in, info, handler) + return out, nil } -func _AuthService_AcquireSemaphore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.AcquireSemaphoreRequest) - if err := dec(in); err != nil { +func (c *authServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/DeleteUser", in, out, opts...) + if err != nil { return nil, err } - if interceptor == nil { - return srv.(AuthServiceServer).AcquireSemaphore(ctx, in) + return out, nil +} + +func (c *authServiceClient) AcquireSemaphore(ctx context.Context, in *services.AcquireSemaphoreRequest, opts ...grpc.CallOption) (*services.SemaphoreLease, error) { + out := new(services.SemaphoreLease) + err := c.cc.Invoke(ctx, "/proto.AuthService/AcquireSemaphore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) KeepAliveSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/KeepAliveSemaphoreLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) CancelSemaphoreLease(ctx context.Context, in *services.SemaphoreLease, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/CancelSemaphoreLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) GetSemaphores(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*Semaphores, error) { + out := new(Semaphores) + err := c.cc.Invoke(ctx, "/proto.AuthService/GetSemaphores", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) DeleteSemaphore(ctx context.Context, in *services.SemaphoreFilter, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/DeleteSemaphore", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) EmitAuditEvent(ctx context.Context, in *events.OneOf, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/proto.AuthService/EmitAuditEvent", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) CreateAuditStream(ctx context.Context, opts ...grpc.CallOption) (AuthService_CreateAuditStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_AuthService_serviceDesc.Streams[3], "/proto.AuthService/CreateAuditStream", opts...) + if err != nil { + return nil, err + } + x := &authServiceCreateAuditStreamClient{stream} + return x, nil +} + +type AuthService_CreateAuditStreamClient interface { + Send(*AuditStreamRequest) error + Recv() (*events.StreamStatus, error) + grpc.ClientStream +} + +type authServiceCreateAuditStreamClient struct { + grpc.ClientStream +} + +func (x *authServiceCreateAuditStreamClient) Send(m *AuditStreamRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *authServiceCreateAuditStreamClient) Recv() (*events.StreamStatus, error) { + m := new(events.StreamStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for AuthService service + +type AuthServiceServer interface { + // SendKeepAlives allows node to send a stream of keep alive requests + SendKeepAlives(AuthService_SendKeepAlivesServer) error + // WatchEvents returns a new stream of cluster events + WatchEvents(*Watch, AuthService_WatchEventsServer) error + // UpsertNode upserts node + UpsertNode(context.Context, *services.ServerV2) (*services.KeepAlive, error) + // GenerateUserCerts generates a set of user certificates for use by `tctl + // auth sign`. + GenerateUserCerts(context.Context, *UserCertsRequest) (*Certs, error) + // GetUser gets a user resource by name. + GetUser(context.Context, *GetUserRequest) (*services.UserV2, error) + // GetUsers gets all current user resources. + GetUsers(*GetUsersRequest, AuthService_GetUsersServer) error + // GetAccessRequests gets all pending access requests. + GetAccessRequests(context.Context, *services.AccessRequestFilter) (*AccessRequests, error) + // CreateAccessRequest creates a new access request. + CreateAccessRequest(context.Context, *services.AccessRequestV3) (*empty.Empty, error) + // DeleteAccessRequest deletes an access request. + DeleteAccessRequest(context.Context, *RequestID) (*empty.Empty, error) + // SetAccessRequestState sets the state of an access request. + SetAccessRequestState(context.Context, *RequestStateSetter) (*empty.Empty, error) + // GetPluginData gets all plugin data matching the supplied filter. + GetPluginData(context.Context, *services.PluginDataFilter) (*PluginDataSeq, error) + // UpdatePluginData updates a plugin's resource-specific datastore. + UpdatePluginData(context.Context, *services.PluginDataUpdateParams) (*empty.Empty, error) + // Ping gets basic info about the auth server. This method is intended + // to mimic the behavior of the proxy's Ping method, and may be used by + // clients for verification or configuration on startup. + Ping(context.Context, *PingRequest) (*PingResponse, error) + // RotateResetPasswordTokenSecrets rotates token secrets for a given tokenID. + RotateResetPasswordTokenSecrets(context.Context, *RotateResetPasswordTokenSecretsRequest) (*services.ResetPasswordTokenSecretsV3, error) + // GetResetPasswordToken returns a token. + GetResetPasswordToken(context.Context, *GetResetPasswordTokenRequest) (*services.ResetPasswordTokenV3, error) + // CreateResetPasswordToken creates ResetPasswordToken. + CreateResetPasswordToken(context.Context, *CreateResetPasswordTokenRequest) (*services.ResetPasswordTokenV3, error) + // CreateUser inserts a new user entry to a backend. + CreateUser(context.Context, *services.UserV2) (*empty.Empty, error) + // UpdateUser updates an existing user in a backend. + UpdateUser(context.Context, *services.UserV2) (*empty.Empty, error) + // DeleteUser deletes an existing user in a backend by username. + DeleteUser(context.Context, *DeleteUserRequest) (*empty.Empty, error) + // AcquireSemaphore acquires lease with requested resources from semaphore. + AcquireSemaphore(context.Context, *services.AcquireSemaphoreRequest) (*services.SemaphoreLease, error) + // KeepAliveSemaphoreLease updates semaphore lease. + KeepAliveSemaphoreLease(context.Context, *services.SemaphoreLease) (*empty.Empty, error) + // CancelSemaphoreLease cancels semaphore lease early. + CancelSemaphoreLease(context.Context, *services.SemaphoreLease) (*empty.Empty, error) + // GetSemaphores returns a list of all semaphores matching the supplied filter. + GetSemaphores(context.Context, *services.SemaphoreFilter) (*Semaphores, error) + // DeleteSemaphore deletes a semaphore matching the supplied filter. + DeleteSemaphore(context.Context, *services.SemaphoreFilter) (*empty.Empty, error) + // EmitAuditEvent emits audit event + EmitAuditEvent(context.Context, *events.OneOf) (*empty.Empty, error) + // CreateAuditStream creates or resumes audit events streams + CreateAuditStream(AuthService_CreateAuditStreamServer) error +} + +func RegisterAuthServiceServer(s *grpc.Server, srv AuthServiceServer) { + s.RegisterService(&_AuthService_serviceDesc, srv) +} + +func _AuthService_SendKeepAlives_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AuthServiceServer).SendKeepAlives(&authServiceSendKeepAlivesServer{stream}) +} + +type AuthService_SendKeepAlivesServer interface { + SendAndClose(*empty.Empty) error + Recv() (*services.KeepAlive, error) + grpc.ServerStream +} + +type authServiceSendKeepAlivesServer struct { + grpc.ServerStream +} + +func (x *authServiceSendKeepAlivesServer) SendAndClose(m *empty.Empty) error { + return x.ServerStream.SendMsg(m) +} + +func (x *authServiceSendKeepAlivesServer) Recv() (*services.KeepAlive, error) { + m := new(services.KeepAlive) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _AuthService_WatchEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Watch) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(AuthServiceServer).WatchEvents(m, &authServiceWatchEventsServer{stream}) +} + +type AuthService_WatchEventsServer interface { + Send(*Event) error + grpc.ServerStream +} + +type authServiceWatchEventsServer struct { + grpc.ServerStream +} + +func (x *authServiceWatchEventsServer) Send(m *Event) error { + return x.ServerStream.SendMsg(m) +} + +func _AuthService_UpsertNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.ServerV2) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).UpsertNode(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.AuthService/AcquireSemaphore", + FullMethod: "/proto.AuthService/UpsertNode", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).AcquireSemaphore(ctx, req.(*services.AcquireSemaphoreRequest)) + return srv.(AuthServiceServer).UpsertNode(ctx, req.(*services.ServerV2)) } return interceptor(ctx, in, info, handler) } -func _AuthService_KeepAliveSemaphoreLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.SemaphoreLease) +func _AuthService_GenerateUserCerts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UserCertsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(AuthServiceServer).KeepAliveSemaphoreLease(ctx, in) + return srv.(AuthServiceServer).GenerateUserCerts(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.AuthService/KeepAliveSemaphoreLease", + FullMethod: "/proto.AuthService/GenerateUserCerts", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).KeepAliveSemaphoreLease(ctx, req.(*services.SemaphoreLease)) + return srv.(AuthServiceServer).GenerateUserCerts(ctx, req.(*UserCertsRequest)) } return interceptor(ctx, in, info, handler) } -func _AuthService_CancelSemaphoreLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.SemaphoreLease) +func _AuthService_GetUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(AuthServiceServer).CancelSemaphoreLease(ctx, in) + return srv.(AuthServiceServer).GetUser(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.AuthService/CancelSemaphoreLease", + FullMethod: "/proto.AuthService/GetUser", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).CancelSemaphoreLease(ctx, req.(*services.SemaphoreLease)) + return srv.(AuthServiceServer).GetUser(ctx, req.(*GetUserRequest)) } return interceptor(ctx, in, info, handler) } -func _AuthService_GetSemaphores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.SemaphoreFilter) +func _AuthService_GetUsers_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetUsersRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(AuthServiceServer).GetUsers(m, &authServiceGetUsersServer{stream}) +} + +type AuthService_GetUsersServer interface { + Send(*services.UserV2) error + grpc.ServerStream +} + +type authServiceGetUsersServer struct { + grpc.ServerStream +} + +func (x *authServiceGetUsersServer) Send(m *services.UserV2) error { + return x.ServerStream.SendMsg(m) +} + +func _AuthService_GetAccessRequests_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.AccessRequestFilter) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(AuthServiceServer).GetSemaphores(ctx, in) + return srv.(AuthServiceServer).GetAccessRequests(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.AuthService/GetSemaphores", + FullMethod: "/proto.AuthService/GetAccessRequests", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).GetSemaphores(ctx, req.(*services.SemaphoreFilter)) + return srv.(AuthServiceServer).GetAccessRequests(ctx, req.(*services.AccessRequestFilter)) } return interceptor(ctx, in, info, handler) } -func _AuthService_DeleteSemaphore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(services.SemaphoreFilter) +func _AuthService_CreateAccessRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.AccessRequestV3) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(AuthServiceServer).DeleteSemaphore(ctx, in) + return srv.(AuthServiceServer).CreateAccessRequest(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.AuthService/DeleteSemaphore", + FullMethod: "/proto.AuthService/CreateAccessRequest", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).DeleteSemaphore(ctx, req.(*services.SemaphoreFilter)) + return srv.(AuthServiceServer).CreateAccessRequest(ctx, req.(*services.AccessRequestV3)) } return interceptor(ctx, in, info, handler) } -var _AuthService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "proto.AuthService", - HandlerType: (*AuthServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UpsertNode", - Handler: _AuthService_UpsertNode_Handler, - }, - { - MethodName: "GenerateUserCerts", - Handler: _AuthService_GenerateUserCerts_Handler, - }, - { - MethodName: "GetUser", - Handler: _AuthService_GetUser_Handler, - }, - { - MethodName: "GetAccessRequests", - Handler: _AuthService_GetAccessRequests_Handler, - }, - { - MethodName: "CreateAccessRequest", - Handler: _AuthService_CreateAccessRequest_Handler, - }, - { - MethodName: "DeleteAccessRequest", - Handler: _AuthService_DeleteAccessRequest_Handler, - }, - { - MethodName: "SetAccessRequestState", - Handler: _AuthService_SetAccessRequestState_Handler, - }, - { - MethodName: "GetPluginData", - Handler: _AuthService_GetPluginData_Handler, - }, - { - MethodName: "UpdatePluginData", - Handler: _AuthService_UpdatePluginData_Handler, - }, - { - MethodName: "Ping", - Handler: _AuthService_Ping_Handler, - }, - { - MethodName: "RotateResetPasswordTokenSecrets", - Handler: _AuthService_RotateResetPasswordTokenSecrets_Handler, - }, - { - MethodName: "GetResetPasswordToken", - Handler: _AuthService_GetResetPasswordToken_Handler, - }, - { - MethodName: "CreateResetPasswordToken", - Handler: _AuthService_CreateResetPasswordToken_Handler, - }, - { - MethodName: "CreateUser", - Handler: _AuthService_CreateUser_Handler, - }, - { - MethodName: "UpdateUser", - Handler: _AuthService_UpdateUser_Handler, - }, - { - MethodName: "DeleteUser", - Handler: _AuthService_DeleteUser_Handler, - }, - { - MethodName: "AcquireSemaphore", - Handler: _AuthService_AcquireSemaphore_Handler, - }, - { - MethodName: "KeepAliveSemaphoreLease", - Handler: _AuthService_KeepAliveSemaphoreLease_Handler, - }, - { - MethodName: "CancelSemaphoreLease", - Handler: _AuthService_CancelSemaphoreLease_Handler, - }, - { - MethodName: "GetSemaphores", - Handler: _AuthService_GetSemaphores_Handler, - }, - { - MethodName: "DeleteSemaphore", - Handler: _AuthService_DeleteSemaphore_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "SendKeepAlives", - Handler: _AuthService_SendKeepAlives_Handler, - ClientStreams: true, - }, - { - StreamName: "WatchEvents", - Handler: _AuthService_WatchEvents_Handler, - ServerStreams: true, - }, - { - StreamName: "GetUsers", - Handler: _AuthService_GetUsers_Handler, - ServerStreams: true, - }, - }, - Metadata: "auth.proto", +func _AuthService_DeleteAccessRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).DeleteAccessRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/DeleteAccessRequest", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).DeleteAccessRequest(ctx, req.(*RequestID)) + } + return interceptor(ctx, in, info, handler) } -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_SetAccessRequestState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestStateSetter) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(AuthServiceServer).SetAccessRequestState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/SetAccessRequestState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).SetAccessRequestState(ctx, req.(*RequestStateSetter)) + } + return interceptor(ctx, in, info, handler) } -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.Type)) +func _AuthService_GetPluginData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.PluginDataFilter) + if err := dec(in); err != nil { + return nil, err } - if m.Resource != nil { - nn1, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn1 + if interceptor == nil { + return srv.(AuthServiceServer).GetPluginData(ctx, in) } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/GetPluginData", } - return i, nil + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).GetPluginData(ctx, req.(*services.PluginDataFilter)) + } + return interceptor(ctx, in, info, handler) } -func (m *Event_ResourceHeader) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResourceHeader != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.ResourceHeader.Size())) - n2, err := m.ResourceHeader.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 +func _AuthService_UpdatePluginData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.PluginDataUpdateParams) + if err := dec(in); err != nil { + return nil, err } - return i, nil -} -func (m *Event_CertAuthority) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.CertAuthority != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.CertAuthority.Size())) - n3, err := m.CertAuthority.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 + if interceptor == nil { + return srv.(AuthServiceServer).UpdatePluginData(ctx, in) } - return i, nil -} -func (m *Event_StaticTokens) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.StaticTokens != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.StaticTokens.Size())) - n4, err := m.StaticTokens.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/UpdatePluginData", } - return i, nil -} -func (m *Event_ProvisionToken) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ProvisionToken != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.ProvisionToken.Size())) - n5, err := m.ProvisionToken.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).UpdatePluginData(ctx, req.(*services.PluginDataUpdateParams)) } - return i, nil + return interceptor(ctx, in, info, handler) } -func (m *Event_ClusterName) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ClusterName != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.ClusterName.Size())) - n6, err := m.ClusterName.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 + +func _AuthService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err } - return i, nil + if interceptor == nil { + return srv.(AuthServiceServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).Ping(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *Event_ClusterConfig) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ClusterConfig != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.ClusterConfig.Size())) - n7, err := m.ClusterConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 + +func _AuthService_RotateResetPasswordTokenSecrets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RotateResetPasswordTokenSecretsRequest) + if err := dec(in); err != nil { + return nil, err } - return i, nil -} -func (m *Event_User) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.User != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.User.Size())) - n8, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 + if interceptor == nil { + return srv.(AuthServiceServer).RotateResetPasswordTokenSecrets(ctx, in) } - return i, nil -} -func (m *Event_Role) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Role != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.Role.Size())) - n9, err := m.Role.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/RotateResetPasswordTokenSecrets", } - return i, nil -} -func (m *Event_Namespace) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Namespace != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.Namespace.Size())) - n10, err := m.Namespace.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).RotateResetPasswordTokenSecrets(ctx, req.(*RotateResetPasswordTokenSecretsRequest)) } - return i, nil + return interceptor(ctx, in, info, handler) } -func (m *Event_Server) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Server != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.Server.Size())) - n11, err := m.Server.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 + +func _AuthService_GetResetPasswordToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResetPasswordTokenRequest) + if err := dec(in); err != nil { + return nil, err } - return i, nil -} -func (m *Event_ReverseTunnel) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ReverseTunnel != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.ReverseTunnel.Size())) - n12, err := m.ReverseTunnel.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 + if interceptor == nil { + return srv.(AuthServiceServer).GetResetPasswordToken(ctx, in) } - return i, nil -} -func (m *Event_TunnelConnection) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.TunnelConnection != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.TunnelConnection.Size())) - n13, err := m.TunnelConnection.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/GetResetPasswordToken", } - return i, nil -} -func (m *Event_AccessRequest) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.AccessRequest != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.AccessRequest.Size())) - n14, err := m.AccessRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).GetResetPasswordToken(ctx, req.(*GetResetPasswordTokenRequest)) } - return i, nil + return interceptor(ctx, in, info, handler) } -func (m *Watch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { + +func _AuthService_CreateResetPasswordToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateResetPasswordTokenRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *Watch) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Kinds) > 0 { - for _, msg := range m.Kinds { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + if interceptor == nil { + return srv.(AuthServiceServer).CreateResetPasswordToken(ctx, in) } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/CreateResetPasswordToken", } - return i, nil + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).CreateResetPasswordToken(ctx, req.(*CreateResetPasswordTokenRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *WatchKind) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_CreateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.UserV2) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(AuthServiceServer).CreateUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/CreateUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).CreateUser(ctx, req.(*services.UserV2)) + } + return interceptor(ctx, in, info, handler) } -func (m *WatchKind) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Kind) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - } - if m.LoadSecrets { - dAtA[i] = 0x10 - i++ - if m.LoadSecrets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ +func _AuthService_UpdateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.UserV2) + if err := dec(in); err != nil { + return nil, err } - if len(m.Name) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if interceptor == nil { + return srv.(AuthServiceServer).UpdateUser(ctx, in) } - if len(m.Filter) > 0 { - for k, _ := range m.Filter { - dAtA[i] = 0x22 - i++ - v := m.Filter[k] - mapSize := 1 + len(k) + sovAuth(uint64(len(k))) + 1 + len(v) + sovAuth(uint64(len(v))) - i = encodeVarintAuth(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/UpdateUser", } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).UpdateUser(ctx, req.(*services.UserV2)) } - return i, nil + return interceptor(ctx, in, info, handler) } -func (m *Certs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_DeleteUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUserRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *Certs) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.SSH) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.SSH))) - i += copy(dAtA[i:], m.SSH) + if interceptor == nil { + return srv.(AuthServiceServer).DeleteUser(ctx, in) } - if len(m.TLS) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.TLS))) - i += copy(dAtA[i:], m.TLS) + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/DeleteUser", } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).DeleteUser(ctx, req.(*DeleteUserRequest)) } - return i, nil + return interceptor(ctx, in, info, handler) } -func (m *UserCertsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_AcquireSemaphore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.AcquireSemaphoreRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *UserCertsRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PublicKey) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.PublicKey))) - i += copy(dAtA[i:], m.PublicKey) + if interceptor == nil { + return srv.(AuthServiceServer).AcquireSemaphore(ctx, in) } - if len(m.Username) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/AcquireSemaphore", } - dAtA[i] = 0x1a - i++ - i = encodeVarintAuth(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Expires))) - n15, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Expires, dAtA[i:]) - if err != nil { - return 0, err + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).AcquireSemaphore(ctx, req.(*services.AcquireSemaphoreRequest)) } - i += n15 - if len(m.Format) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Format))) - i += copy(dAtA[i:], m.Format) + return interceptor(ctx, in, info, handler) +} + +func _AuthService_KeepAliveSemaphoreLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.SemaphoreLease) + if err := dec(in); err != nil { + return nil, err } - if len(m.RouteToCluster) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.RouteToCluster))) - i += copy(dAtA[i:], m.RouteToCluster) + if interceptor == nil { + return srv.(AuthServiceServer).KeepAliveSemaphoreLease(ctx, in) } - if len(m.AccessRequests) > 0 { - for _, s := range m.AccessRequests { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/KeepAliveSemaphoreLease", } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).KeepAliveSemaphoreLease(ctx, req.(*services.SemaphoreLease)) } - return i, nil + return interceptor(ctx, in, info, handler) } -func (m *GetUserRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_CancelSemaphoreLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.SemaphoreLease) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *GetUserRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if interceptor == nil { + return srv.(AuthServiceServer).CancelSemaphoreLease(ctx, in) } - if m.WithSecrets { - dAtA[i] = 0x10 - i++ - if m.WithSecrets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/CancelSemaphoreLease", } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).CancelSemaphoreLease(ctx, req.(*services.SemaphoreLease)) } - return i, nil + return interceptor(ctx, in, info, handler) } -func (m *GetUsersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_GetSemaphores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.SemaphoreFilter) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *GetUsersRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.WithSecrets { - dAtA[i] = 0x8 - i++ - if m.WithSecrets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + if interceptor == nil { + return srv.(AuthServiceServer).GetSemaphores(ctx, in) } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/GetSemaphores", } - return i, nil + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).GetSemaphores(ctx, req.(*services.SemaphoreFilter)) + } + return interceptor(ctx, in, info, handler) } -func (m *AccessRequests) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_DeleteSemaphore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(services.SemaphoreFilter) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(AuthServiceServer).DeleteSemaphore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/DeleteSemaphore", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).DeleteSemaphore(ctx, req.(*services.SemaphoreFilter)) + } + return interceptor(ctx, in, info, handler) } -func (m *AccessRequests) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AccessRequests) > 0 { - for _, msg := range m.AccessRequests { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +func _AuthService_EmitAuditEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(events.OneOf) + if err := dec(in); err != nil { + return nil, err } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if interceptor == nil { + return srv.(AuthServiceServer).EmitAuditEvent(ctx, in) } - return i, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.AuthService/EmitAuditEvent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).EmitAuditEvent(ctx, req.(*events.OneOf)) + } + return interceptor(ctx, in, info, handler) } -func (m *PluginDataSeq) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { +func _AuthService_CreateAuditStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(AuthServiceServer).CreateAuditStream(&authServiceCreateAuditStreamServer{stream}) +} + +type AuthService_CreateAuditStreamServer interface { + Send(*events.StreamStatus) error + Recv() (*AuditStreamRequest, error) + grpc.ServerStream +} + +type authServiceCreateAuditStreamServer struct { + grpc.ServerStream +} + +func (x *authServiceCreateAuditStreamServer) Send(m *events.StreamStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *authServiceCreateAuditStreamServer) Recv() (*AuditStreamRequest, error) { + m := new(AuditStreamRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } - return dAtA[:n], nil + return m, nil } -func (m *PluginDataSeq) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PluginData) > 0 { - for _, msg := range m.PluginData { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil +var _AuthService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.AuthService", + HandlerType: (*AuthServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpsertNode", + Handler: _AuthService_UpsertNode_Handler, + }, + { + MethodName: "GenerateUserCerts", + Handler: _AuthService_GenerateUserCerts_Handler, + }, + { + MethodName: "GetUser", + Handler: _AuthService_GetUser_Handler, + }, + { + MethodName: "GetAccessRequests", + Handler: _AuthService_GetAccessRequests_Handler, + }, + { + MethodName: "CreateAccessRequest", + Handler: _AuthService_CreateAccessRequest_Handler, + }, + { + MethodName: "DeleteAccessRequest", + Handler: _AuthService_DeleteAccessRequest_Handler, + }, + { + MethodName: "SetAccessRequestState", + Handler: _AuthService_SetAccessRequestState_Handler, + }, + { + MethodName: "GetPluginData", + Handler: _AuthService_GetPluginData_Handler, + }, + { + MethodName: "UpdatePluginData", + Handler: _AuthService_UpdatePluginData_Handler, + }, + { + MethodName: "Ping", + Handler: _AuthService_Ping_Handler, + }, + { + MethodName: "RotateResetPasswordTokenSecrets", + Handler: _AuthService_RotateResetPasswordTokenSecrets_Handler, + }, + { + MethodName: "GetResetPasswordToken", + Handler: _AuthService_GetResetPasswordToken_Handler, + }, + { + MethodName: "CreateResetPasswordToken", + Handler: _AuthService_CreateResetPasswordToken_Handler, + }, + { + MethodName: "CreateUser", + Handler: _AuthService_CreateUser_Handler, + }, + { + MethodName: "UpdateUser", + Handler: _AuthService_UpdateUser_Handler, + }, + { + MethodName: "DeleteUser", + Handler: _AuthService_DeleteUser_Handler, + }, + { + MethodName: "AcquireSemaphore", + Handler: _AuthService_AcquireSemaphore_Handler, + }, + { + MethodName: "KeepAliveSemaphoreLease", + Handler: _AuthService_KeepAliveSemaphoreLease_Handler, + }, + { + MethodName: "CancelSemaphoreLease", + Handler: _AuthService_CancelSemaphoreLease_Handler, + }, + { + MethodName: "GetSemaphores", + Handler: _AuthService_GetSemaphores_Handler, + }, + { + MethodName: "DeleteSemaphore", + Handler: _AuthService_DeleteSemaphore_Handler, + }, + { + MethodName: "EmitAuditEvent", + Handler: _AuthService_EmitAuditEvent_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "SendKeepAlives", + Handler: _AuthService_SendKeepAlives_Handler, + ClientStreams: true, + }, + { + StreamName: "WatchEvents", + Handler: _AuthService_WatchEvents_Handler, + ServerStreams: true, + }, + { + StreamName: "GetUsers", + Handler: _AuthService_GetUsers_Handler, + ServerStreams: true, + }, + { + StreamName: "CreateAuditStream", + Handler: _AuthService_CreateAuditStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "auth.proto", } -func (m *RequestStateSetter) Marshal() (dAtA []byte, err error) { +func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -3090,27 +3145,22 @@ func (m *RequestStateSetter) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RequestStateSetter) MarshalTo(dAtA []byte) (int, error) { +func (m *Event) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.State != 0 { - dAtA[i] = 0x10 + if m.Type != 0 { + dAtA[i] = 0x8 i++ - i = encodeVarintAuth(dAtA, i, uint64(m.State)) + i = encodeVarintAuth(dAtA, i, uint64(m.Type)) } - if len(m.Delegator) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Delegator))) - i += copy(dAtA[i:], m.Delegator) + if m.Resource != nil { + nn1, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3118,61 +3168,189 @@ func (m *RequestStateSetter) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *RequestID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *Event_ResourceHeader) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResourceHeader != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.ResourceHeader.Size())) + n2, err := m.ResourceHeader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - return dAtA[:n], nil + return i, nil } - -func (m *RequestID) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa +func (m *Event_CertAuthority) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CertAuthority != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + i = encodeVarintAuth(dAtA, i, uint64(m.CertAuthority.Size())) + n3, err := m.CertAuthority.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 } return i, nil } - -func (m *RotateResetPasswordTokenSecretsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *Event_StaticTokens) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.StaticTokens != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.StaticTokens.Size())) + n4, err := m.StaticTokens.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 } - return dAtA[:n], nil + return i, nil } - -func (m *RotateResetPasswordTokenSecretsRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.TokenID) > 0 { - dAtA[i] = 0xa +func (m *Event_ProvisionToken) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ProvisionToken != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.TokenID))) - i += copy(dAtA[i:], m.TokenID) + i = encodeVarintAuth(dAtA, i, uint64(m.ProvisionToken.Size())) + n5, err := m.ProvisionToken.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + return i, nil +} +func (m *Event_ClusterName) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ClusterName != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.ClusterName.Size())) + n6, err := m.ClusterName.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 } return i, nil } - -func (m *GetResetPasswordTokenRequest) Marshal() (dAtA []byte, err error) { +func (m *Event_ClusterConfig) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ClusterConfig != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.ClusterConfig.Size())) + n7, err := m.ClusterConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Event_User) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.User != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.User.Size())) + n8, err := m.User.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *Event_Role) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Role != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Role.Size())) + n9, err := m.Role.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *Event_Namespace) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Namespace != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Namespace.Size())) + n10, err := m.Namespace.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *Event_Server) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Server != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Server.Size())) + n11, err := m.Server.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *Event_ReverseTunnel) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ReverseTunnel != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.ReverseTunnel.Size())) + n12, err := m.ReverseTunnel.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *Event_TunnelConnection) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.TunnelConnection != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.TunnelConnection.Size())) + n13, err := m.TunnelConnection.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *Event_AccessRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.AccessRequest != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.AccessRequest.Size())) + n14, err := m.AccessRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *Watch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -3182,16 +3360,22 @@ func (m *GetResetPasswordTokenRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetResetPasswordTokenRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Watch) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.TokenID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.TokenID))) - i += copy(dAtA[i:], m.TokenID) + if len(m.Kinds) > 0 { + for _, msg := range m.Kinds { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3199,7 +3383,7 @@ func (m *GetResetPasswordTokenRequest) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *CreateResetPasswordTokenRequest) Marshal() (dAtA []byte, err error) { +func (m *WatchKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -3209,27 +3393,49 @@ func (m *CreateResetPasswordTokenRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CreateResetPasswordTokenRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *WatchKind) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Name) > 0 { + if len(m.Kind) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) } - if len(m.Type) > 0 { - dAtA[i] = 0x12 + if m.LoadSecrets { + dAtA[i] = 0x10 + i++ + if m.LoadSecrets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) } - if m.TTL != 0 { - dAtA[i] = 0x18 + if len(m.Name) > 0 { + dAtA[i] = 0x1a i++ - i = encodeVarintAuth(dAtA, i, uint64(m.TTL)) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Filter) > 0 { + for k, _ := range m.Filter { + dAtA[i] = 0x22 + i++ + v := m.Filter[k] + mapSize := 1 + len(k) + sovAuth(uint64(len(k))) + 1 + len(v) + sovAuth(uint64(len(v))) + i = encodeVarintAuth(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3237,7 +3443,7 @@ func (m *CreateResetPasswordTokenRequest) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *PingRequest) Marshal() (dAtA []byte, err error) { +func (m *Certs) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -3247,18 +3453,30 @@ func (m *PingRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Certs) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l + if len(m.SSH) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.SSH))) + i += copy(dAtA[i:], m.SSH) + } + if len(m.TLS) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.TLS))) + i += copy(dAtA[i:], m.TLS) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func (m *PingResponse) Marshal() (dAtA []byte, err error) { +func (m *UserCertsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -3268,22 +3486,57 @@ func (m *PingResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *UserCertsRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.ClusterName) > 0 { + if len(m.PublicKey) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.ClusterName))) - i += copy(dAtA[i:], m.ClusterName) + i = encodeVarintAuth(dAtA, i, uint64(len(m.PublicKey))) + i += copy(dAtA[i:], m.PublicKey) } - if len(m.ServerVersion) > 0 { + if len(m.Username) > 0 { dAtA[i] = 0x12 i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.ServerVersion))) - i += copy(dAtA[i:], m.ServerVersion) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Expires))) + n15, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Expires, dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + if len(m.Format) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Format))) + i += copy(dAtA[i:], m.Format) + } + if len(m.RouteToCluster) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.RouteToCluster))) + i += copy(dAtA[i:], m.RouteToCluster) + } + if len(m.AccessRequests) > 0 { + for _, s := range m.AccessRequests { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -3291,7 +3544,7 @@ func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *DeleteUserRequest) Marshal() (dAtA []byte, err error) { +func (m *GetUserRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -3301,7 +3554,7 @@ func (m *DeleteUserRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeleteUserRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetUserRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -3312,14 +3565,24 @@ func (m *DeleteUserRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Semaphores) Marshal() (dAtA []byte, err error) { - size := m.Size() + if m.WithSecrets { + dAtA[i] = 0x10 + i++ + if m.WithSecrets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GetUsersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { @@ -3328,13 +3591,44 @@ func (m *Semaphores) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Semaphores) MarshalTo(dAtA []byte) (int, error) { +func (m *GetUsersRequest) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Semaphores) > 0 { - for _, msg := range m.Semaphores { + if m.WithSecrets { + dAtA[i] = 0x8 + i++ + if m.WithSecrets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AccessRequests) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessRequests) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AccessRequests) > 0 { + for _, msg := range m.AccessRequests { dAtA[i] = 0xa i++ i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) @@ -3351,445 +3645,1909 @@ func (m *Semaphores) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PluginDataSeq) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return offset + 1 + return dAtA[:n], nil } -func (m *Event) Size() (n int) { + +func (m *PluginDataSeq) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.Type != 0 { - n += 1 + sovAuth(uint64(m.Type)) - } - if m.Resource != nil { - n += m.Resource.Size() + if len(m.PluginData) > 0 { + for _, msg := range m.PluginData { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *Event_ResourceHeader) Size() (n int) { - var l int - _ = l - if m.ResourceHeader != nil { - l = m.ResourceHeader.Size() - n += 1 + l + sovAuth(uint64(l)) +func (m *RequestStateSetter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event_CertAuthority) Size() (n int) { + +func (m *RequestStateSetter) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.CertAuthority != nil { - l = m.CertAuthority.Size() - n += 1 + l + sovAuth(uint64(l)) + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - return n + if m.State != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.State)) + } + if len(m.Delegator) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Delegator))) + i += copy(dAtA[i:], m.Delegator) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } -func (m *Event_StaticTokens) Size() (n int) { - var l int - _ = l - if m.StaticTokens != nil { - l = m.StaticTokens.Size() - n += 1 + l + sovAuth(uint64(l)) + +func (m *RequestID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event_ProvisionToken) Size() (n int) { + +func (m *RequestID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.ProvisionToken != nil { - l = m.ProvisionToken.Size() - n += 1 + l + sovAuth(uint64(l)) + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - return n + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } -func (m *Event_ClusterName) Size() (n int) { - var l int - _ = l - if m.ClusterName != nil { - l = m.ClusterName.Size() - n += 1 + l + sovAuth(uint64(l)) + +func (m *RotateResetPasswordTokenSecretsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event_ClusterConfig) Size() (n int) { + +func (m *RotateResetPasswordTokenSecretsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.ClusterConfig != nil { - l = m.ClusterConfig.Size() - n += 1 + l + sovAuth(uint64(l)) + if len(m.TokenID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.TokenID))) + i += copy(dAtA[i:], m.TokenID) } - return n + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } -func (m *Event_User) Size() (n int) { - var l int - _ = l - if m.User != nil { - l = m.User.Size() - n += 1 + l + sovAuth(uint64(l)) + +func (m *GetResetPasswordTokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event_Role) Size() (n int) { + +func (m *GetResetPasswordTokenRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.Role != nil { - l = m.Role.Size() - n += 1 + l + sovAuth(uint64(l)) + if len(m.TokenID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.TokenID))) + i += copy(dAtA[i:], m.TokenID) } - return n + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } -func (m *Event_Namespace) Size() (n int) { - var l int - _ = l - if m.Namespace != nil { - l = m.Namespace.Size() - n += 1 + l + sovAuth(uint64(l)) + +func (m *CreateResetPasswordTokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Event_Server) Size() (n int) { + +func (m *CreateResetPasswordTokenRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.Server != nil { - l = m.Server.Size() - n += 1 + l + sovAuth(uint64(l)) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return n -} -func (m *Event_ReverseTunnel) Size() (n int) { - var l int - _ = l - if m.ReverseTunnel != nil { - l = m.ReverseTunnel.Size() - n += 1 + l + sovAuth(uint64(l)) + if len(m.Type) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) } - return n -} -func (m *Event_TunnelConnection) Size() (n int) { - var l int - _ = l - if m.TunnelConnection != nil { - l = m.TunnelConnection.Size() - n += 1 + l + sovAuth(uint64(l)) + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.TTL)) } - return n + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } -func (m *Event_AccessRequest) Size() (n int) { - var l int - _ = l - if m.AccessRequest != nil { - l = m.AccessRequest.Size() - n += 1 + l + sovAuth(uint64(l)) + +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Watch) Size() (n int) { + +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if len(m.Kinds) > 0 { - for _, e := range m.Kinds { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) - } - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *WatchKind) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if m.LoadSecrets { - n += 2 - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.Filter) > 0 { - for k, v := range m.Filter { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovAuth(uint64(len(k))) + 1 + len(v) + sovAuth(uint64(len(v))) - n += mapEntrySize + 1 + sovAuth(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (m *PingResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Certs) Size() (n int) { +func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.SSH) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) + if len(m.ClusterName) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.ClusterName))) + i += copy(dAtA[i:], m.ClusterName) } - l = len(m.TLS) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) + if len(m.ServerVersion) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.ServerVersion))) + i += copy(dAtA[i:], m.ServerVersion) } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *UserCertsRequest) Size() (n int) { - var l int - _ = l - l = len(m.PublicKey) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Username) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Expires) - n += 1 + l + sovAuth(uint64(l)) - l = len(m.Format) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.RouteToCluster) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.AccessRequests) > 0 { - for _, s := range m.AccessRequests { - l = len(s) - n += 1 + l + sovAuth(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (m *DeleteUserRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *GetUserRequest) Size() (n int) { +func (m *DeleteUserRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if m.WithSecrets { - n += 2 + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *GetUsersRequest) Size() (n int) { - var l int - _ = l - if m.WithSecrets { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (m *Semaphores) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *AccessRequests) Size() (n int) { +func (m *Semaphores) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if len(m.AccessRequests) > 0 { - for _, e := range m.AccessRequests { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) + if len(m.Semaphores) > 0 { + for _, msg := range m.Semaphores { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *PluginDataSeq) Size() (n int) { - var l int - _ = l - if len(m.PluginData) > 0 { - for _, e := range m.PluginData { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (m *AuditStreamRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *RequestStateSetter) Size() (n int) { +func (m *AuditStreamRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if m.State != 0 { - n += 1 + sovAuth(uint64(m.State)) - } - l = len(m.Delegator) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) + if m.Request != nil { + nn16, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn16 } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *RequestID) Size() (n int) { +func (m *AuditStreamRequest_CreateStream) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CreateStream != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.CreateStream.Size())) + n17, err := m.CreateStream.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} +func (m *AuditStreamRequest_ResumeStream) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResumeStream != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.ResumeStream.Size())) + n18, err := m.ResumeStream.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} +func (m *AuditStreamRequest_CompleteStream) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CompleteStream != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.CompleteStream.Size())) + n19, err := m.CompleteStream.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} +func (m *AuditStreamRequest_FlushAndCloseStream) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.FlushAndCloseStream != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.FlushAndCloseStream.Size())) + n20, err := m.FlushAndCloseStream.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} +func (m *AuditStreamRequest_Event) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Event != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Event.Size())) + n21, err := m.Event.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + return i, nil +} +func (m *AuditStreamStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditStreamStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) + if len(m.UploadID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.UploadID))) + i += copy(dAtA[i:], m.UploadID) } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *RotateResetPasswordTokenSecretsRequest) Size() (n int) { +func (m *CreateStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateStream) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.TokenID) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *GetResetPasswordTokenRequest) Size() (n int) { +func (m *ResumeStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResumeStream) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.TokenID) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if len(m.UploadID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.UploadID))) + i += copy(dAtA[i:], m.UploadID) } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *CreateResetPasswordTokenRequest) Size() (n int) { +func (m *CompleteStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompleteStream) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - if m.TTL != 0 { - n += 1 + sovAuth(uint64(m.TTL)) + return i, nil +} + +func (m *FlushAndCloseStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *FlushAndCloseStream) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - return n + return i, nil } -func (m *PingRequest) Size() (n int) { +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Event) Size() (n int) { var l int _ = l + if m.Type != 0 { + n += 1 + sovAuth(uint64(m.Type)) + } + if m.Resource != nil { + n += m.Resource.Size() + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } -func (m *PingResponse) Size() (n int) { +func (m *Event_ResourceHeader) Size() (n int) { var l int _ = l - l = len(m.ClusterName) - if l > 0 { + if m.ResourceHeader != nil { + l = m.ResourceHeader.Size() n += 1 + l + sovAuth(uint64(l)) } - l = len(m.ServerVersion) - if l > 0 { + return n +} +func (m *Event_CertAuthority) Size() (n int) { + var l int + _ = l + if m.CertAuthority != nil { + l = m.CertAuthority.Size() n += 1 + l + sovAuth(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return n +} +func (m *Event_StaticTokens) Size() (n int) { + var l int + _ = l + if m.StaticTokens != nil { + l = m.StaticTokens.Size() + n += 1 + l + sovAuth(uint64(l)) } return n } - -func (m *DeleteUserRequest) Size() (n int) { +func (m *Event_ProvisionToken) Size() (n int) { var l int _ = l - l = len(m.Name) - if l > 0 { + if m.ProvisionToken != nil { + l = m.ProvisionToken.Size() n += 1 + l + sovAuth(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return n +} +func (m *Event_ClusterName) Size() (n int) { + var l int + _ = l + if m.ClusterName != nil { + l = m.ClusterName.Size() + n += 1 + l + sovAuth(uint64(l)) } return n } - -func (m *Semaphores) Size() (n int) { +func (m *Event_ClusterConfig) Size() (n int) { var l int _ = l - if len(m.Semaphores) > 0 { - for _, e := range m.Semaphores { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) + if m.ClusterConfig != nil { + l = m.ClusterConfig.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Event_User) Size() (n int) { + var l int + _ = l + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Event_Role) Size() (n int) { + var l int + _ = l + if m.Role != nil { + l = m.Role.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Event_Namespace) Size() (n int) { + var l int + _ = l + if m.Namespace != nil { + l = m.Namespace.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Event_Server) Size() (n int) { + var l int + _ = l + if m.Server != nil { + l = m.Server.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Event_ReverseTunnel) Size() (n int) { + var l int + _ = l + if m.ReverseTunnel != nil { + l = m.ReverseTunnel.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Event_TunnelConnection) Size() (n int) { + var l int + _ = l + if m.TunnelConnection != nil { + l = m.TunnelConnection.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Event_AccessRequest) Size() (n int) { + var l int + _ = l + if m.AccessRequest != nil { + l = m.AccessRequest.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *Watch) Size() (n int) { + var l int + _ = l + if len(m.Kinds) > 0 { + for _, e := range m.Kinds { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WatchKind) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.LoadSecrets { + n += 2 + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.Filter) > 0 { + for k, v := range m.Filter { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovAuth(uint64(len(k))) + 1 + len(v) + sovAuth(uint64(len(v))) + n += mapEntrySize + 1 + sovAuth(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Certs) Size() (n int) { + var l int + _ = l + l = len(m.SSH) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.TLS) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UserCertsRequest) Size() (n int) { + var l int + _ = l + l = len(m.PublicKey) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Expires) + n += 1 + l + sovAuth(uint64(l)) + l = len(m.Format) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.RouteToCluster) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.AccessRequests) > 0 { + for _, s := range m.AccessRequests { + l = len(s) + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetUserRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.WithSecrets { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetUsersRequest) Size() (n int) { + var l int + _ = l + if m.WithSecrets { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AccessRequests) Size() (n int) { + var l int + _ = l + if len(m.AccessRequests) > 0 { + for _, e := range m.AccessRequests { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PluginDataSeq) Size() (n int) { + var l int + _ = l + if len(m.PluginData) > 0 { + for _, e := range m.PluginData { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestStateSetter) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.State != 0 { + n += 1 + sovAuth(uint64(m.State)) + } + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestID) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RotateResetPasswordTokenSecretsRequest) Size() (n int) { + var l int + _ = l + l = len(m.TokenID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GetResetPasswordTokenRequest) Size() (n int) { + var l int + _ = l + l = len(m.TokenID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateResetPasswordTokenRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.TTL != 0 { + n += 1 + sovAuth(uint64(m.TTL)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PingRequest) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PingResponse) Size() (n int) { + var l int + _ = l + l = len(m.ClusterName) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.ServerVersion) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteUserRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Semaphores) Size() (n int) { + var l int + _ = l + if len(m.Semaphores) > 0 { + for _, e := range m.Semaphores { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuditStreamRequest) Size() (n int) { + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuditStreamRequest_CreateStream) Size() (n int) { + var l int + _ = l + if m.CreateStream != nil { + l = m.CreateStream.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *AuditStreamRequest_ResumeStream) Size() (n int) { + var l int + _ = l + if m.ResumeStream != nil { + l = m.ResumeStream.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *AuditStreamRequest_CompleteStream) Size() (n int) { + var l int + _ = l + if m.CompleteStream != nil { + l = m.CompleteStream.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *AuditStreamRequest_FlushAndCloseStream) Size() (n int) { + var l int + _ = l + if m.FlushAndCloseStream != nil { + l = m.FlushAndCloseStream.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *AuditStreamRequest_Event) Size() (n int) { + var l int + _ = l + if m.Event != nil { + l = m.Event.Size() + n += 1 + l + sovAuth(uint64(l)) + } + return n +} +func (m *AuditStreamStatus) Size() (n int) { + var l int + _ = l + l = len(m.UploadID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CreateStream) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResumeStream) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.UploadID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CompleteStream) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FlushAndCloseStream) Size() (n int) { + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAuth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Operation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.ResourceHeader{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_ResourceHeader{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertAuthority", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.CertAuthorityV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_CertAuthority{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StaticTokens", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.StaticTokensV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_StaticTokens{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProvisionToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.ProvisionTokenV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_ProvisionToken{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.ClusterNameV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_ClusterName{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.ClusterConfigV3{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_ClusterConfig{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.UserV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_User{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.RoleV3{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_Role{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.Namespace{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_Namespace{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.ServerV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_Server{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReverseTunnel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.ReverseTunnelV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_ReverseTunnel{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TunnelConnection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.TunnelConnectionV2{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_TunnelConnection{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &services.AccessRequestV3{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &Event_AccessRequest{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Watch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Watch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Watch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kinds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kinds = append(m.Kinds, WatchKind{}) + if err := m.Kinds[len(m.Kinds)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSecrets", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LoadSecrets = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthAuth + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthAuth + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Filter[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} -func sovAuth(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } + if iNdEx > l { + return io.ErrUnexpectedEOF } - return n -} -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *Event) Unmarshal(dAtA []byte) error { +func (m *Certs) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3812,36 +5570,17 @@ func (m *Event) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") + return fmt.Errorf("proto: Certs: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Certs: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (Operation(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHeader", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SSH", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -3851,29 +5590,28 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.ResourceHeader{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.SSH = append(m.SSH[:0], dAtA[iNdEx:postIndex]...) + if m.SSH == nil { + m.SSH = []byte{} } - m.Resource = &Event_ResourceHeader{v} iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CertAuthority", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -3883,29 +5621,79 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.CertAuthorityV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TLS = append(m.TLS[:0], dAtA[iNdEx:postIndex]...) + if m.TLS == nil { + m.TLS = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { return err } - m.Resource = &Event_CertAuthority{v} - iNdEx = postIndex - case 4: + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserCertsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserCertsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StaticTokens", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -3915,29 +5703,28 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.StaticTokensV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.PublicKey == nil { + m.PublicKey = []byte{} } - m.Resource = &Event_StaticTokens{v} iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProvisionToken", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -3947,27 +5734,24 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.ProvisionTokenV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Resource = &Event_ProvisionToken{v} + m.Username = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3991,17 +5775,15 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.ClusterNameV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Expires, dAtA[iNdEx:postIndex]); err != nil { return err } - m.Resource = &Event_ClusterName{v} iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4011,29 +5793,26 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.ClusterConfigV3{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Resource = &Event_ClusterConfig{v} + m.Format = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RouteToCluster", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4043,29 +5822,26 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.UserV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Resource = &Event_User{v} + m.RouteToCluster = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AccessRequests", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4075,61 +5851,77 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.RoleV3{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AccessRequests = append(m.AccessRequests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { return err } - m.Resource = &Event_Role{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + if skippy < 0 { + return ErrInvalidLengthAuth } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if msglen < 0 { - return ErrInvalidLengthAuth + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUserRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth } - postIndex := iNdEx + msglen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - v := &services.Namespace{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - m.Resource = &Event_Namespace{v} - iNdEx = postIndex - case 11: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUserRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUserRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4139,29 +5931,26 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.ServerV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Resource = &Event_Server{v} + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReverseTunnel", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WithSecrets", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4171,29 +5960,68 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + m.WithSecrets = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - v := &services.ReverseTunnelV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUsersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - m.Resource = &Event_ReverseTunnel{v} - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TunnelConnection", wireType) + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUsersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUsersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WithSecrets", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4203,27 +6031,66 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + m.WithSecrets = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - v := &services.TunnelConnectionV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AccessRequests) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth } - m.Resource = &Event_TunnelConnection{v} - iNdEx = postIndex - case 14: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AccessRequests: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AccessRequests: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AccessRequests", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4247,11 +6114,10 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &services.AccessRequestV3{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AccessRequests = append(m.AccessRequests, &services.AccessRequestV3{}) + if err := m.AccessRequests[len(m.AccessRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Resource = &Event_AccessRequest{v} iNdEx = postIndex default: iNdEx = preIndex @@ -4275,7 +6141,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } return nil } -func (m *Watch) Unmarshal(dAtA []byte) error { +func (m *PluginDataSeq) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4298,15 +6164,15 @@ func (m *Watch) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Watch: wiretype end group for non-group") + return fmt.Errorf("proto: PluginDataSeq: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Watch: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PluginDataSeq: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kinds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PluginData", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4330,8 +6196,8 @@ func (m *Watch) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kinds = append(m.Kinds, WatchKind{}) - if err := m.Kinds[len(m.Kinds)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.PluginData = append(m.PluginData, &services.PluginDataV3{}) + if err := m.PluginData[len(m.PluginData)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4357,7 +6223,7 @@ func (m *Watch) Unmarshal(dAtA []byte) error { } return nil } -func (m *WatchKind) Unmarshal(dAtA []byte) error { +func (m *RequestStateSetter) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4380,15 +6246,15 @@ func (m *WatchKind) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WatchKind: wiretype end group for non-group") + return fmt.Errorf("proto: RequestStateSetter: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WatchKind: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestStateSetter: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4413,13 +6279,13 @@ func (m *WatchKind) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadSecrets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var v int + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4429,158 +6295,39 @@ func (m *WatchKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + m.State |= (services.RequestState(b) & 0x7F) << shift if b < 0x80 { break } } - m.LoadSecrets = bool(v != 0) case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Filter == nil { - m.Filter = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthAuth - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthAuth - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } } - m.Filter[mapkey] = mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4604,7 +6351,7 @@ func (m *WatchKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *Certs) Unmarshal(dAtA []byte) error { +func (m *RequestID) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4627,48 +6374,17 @@ func (m *Certs) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Certs: wiretype end group for non-group") + return fmt.Errorf("proto: RequestID: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Certs: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestID: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SSH", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SSH = append(m.SSH[:0], dAtA[iNdEx:postIndex]...) - if m.SSH == nil { - m.SSH = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4678,22 +6394,20 @@ func (m *Certs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS[:0], dAtA[iNdEx:postIndex]...) - if m.TLS == nil { - m.TLS = []byte{} - } + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4717,7 +6431,7 @@ func (m *Certs) Unmarshal(dAtA []byte) error { } return nil } -func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { +func (m *RotateResetPasswordTokenSecretsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4740,17 +6454,17 @@ func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UserCertsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RotateResetPasswordTokenSecretsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UserCertsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RotateResetPasswordTokenSecretsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TokenID", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4760,26 +6474,75 @@ func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...) - if m.PublicKey == nil { - m.PublicKey = []byte{} - } + m.TokenID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResetPasswordTokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResetPasswordTokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TokenID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4804,41 +6567,62 @@ func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Username = string(dAtA[iNdEx:postIndex]) + m.TokenID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Expires, dAtA[iNdEx:postIndex]); err != nil { - return err + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth } - iNdEx = postIndex - case 4: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateResetPasswordTokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateResetPasswordTokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4863,11 +6647,11 @@ func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Format = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RouteToCluster", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4892,13 +6676,13 @@ func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RouteToCluster = string(dAtA[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessRequests", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) } - var stringLen uint64 + m.TTL = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -4906,23 +6690,13 @@ func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.AccessRequests = append(m.AccessRequests, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAuth(dAtA[iNdEx:]) @@ -4945,7 +6719,7 @@ func (m *UserCertsRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetUserRequest) Unmarshal(dAtA []byte) error { +func (m *PingRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4968,61 +6742,12 @@ func (m *GetUserRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetUserRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetUserRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WithSecrets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.WithSecrets = bool(v != 0) default: iNdEx = preIndex skippy, err := skipAuth(dAtA[iNdEx:]) @@ -5045,7 +6770,7 @@ func (m *GetUserRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetUsersRequest) Unmarshal(dAtA []byte) error { +func (m *PingResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5068,17 +6793,17 @@ func (m *GetUsersRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetUsersRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetUsersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WithSecrets", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -5088,12 +6813,50 @@ func (m *GetUsersRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.WithSecrets = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAuth(dAtA[iNdEx:]) @@ -5116,7 +6879,7 @@ func (m *GetUsersRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *AccessRequests) Unmarshal(dAtA []byte) error { +func (m *DeleteUserRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5139,17 +6902,17 @@ func (m *AccessRequests) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AccessRequests: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteUserRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AccessRequests: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteUserRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessRequests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -5159,22 +6922,20 @@ func (m *AccessRequests) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.AccessRequests = append(m.AccessRequests, &services.AccessRequestV3{}) - if err := m.AccessRequests[len(m.AccessRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5198,7 +6959,7 @@ func (m *AccessRequests) Unmarshal(dAtA []byte) error { } return nil } -func (m *PluginDataSeq) Unmarshal(dAtA []byte) error { +func (m *Semaphores) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5221,15 +6982,15 @@ func (m *PluginDataSeq) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PluginDataSeq: wiretype end group for non-group") + return fmt.Errorf("proto: Semaphores: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PluginDataSeq: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Semaphores: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PluginData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Semaphores", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5253,8 +7014,8 @@ func (m *PluginDataSeq) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PluginData = append(m.PluginData, &services.PluginDataV3{}) - if err := m.PluginData[len(m.PluginData)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Semaphores = append(m.Semaphores, &services.SemaphoreV3{}) + if err := m.Semaphores[len(m.Semaphores)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5280,7 +7041,7 @@ func (m *PluginDataSeq) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestStateSetter) Unmarshal(dAtA []byte) error { +func (m *AuditStreamRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5303,17 +7064,17 @@ func (m *RequestStateSetter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestStateSetter: wiretype end group for non-group") + return fmt.Errorf("proto: AuditStreamRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestStateSetter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AuditStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CreateStream", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -5323,26 +7084,29 @@ func (m *RequestStateSetter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + v := &CreateStream{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &AuditStreamRequest_CreateStream{v} iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResumeStream", wireType) } - m.State = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -5352,16 +7116,29 @@ func (m *RequestStateSetter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.State |= (services.RequestState(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResumeStream{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &AuditStreamRequest_ResumeStream{v} + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CompleteStream", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -5371,77 +7148,29 @@ func (m *RequestStateSetter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Delegator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { + v := &CompleteStream{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Request = &AuditStreamRequest_CompleteStream{v} + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FlushAndCloseStream", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -5451,77 +7180,29 @@ func (m *RequestID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { + v := &FlushAndCloseStream{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RotateResetPasswordTokenSecretsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RotateResetPasswordTokenSecretsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RotateResetPasswordTokenSecretsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Request = &AuditStreamRequest_FlushAndCloseStream{v} + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TokenID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -5531,20 +7212,23 @@ func (m *RotateResetPasswordTokenSecretsRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.TokenID = string(dAtA[iNdEx:postIndex]) + v := &events.OneOf{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &AuditStreamRequest_Event{v} iNdEx = postIndex default: iNdEx = preIndex @@ -5568,7 +7252,7 @@ func (m *RotateResetPasswordTokenSecretsRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { +func (m *AuditStreamStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5591,15 +7275,15 @@ func (m *GetResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetResetPasswordTokenRequest: wiretype end group for non-group") + return fmt.Errorf("proto: AuditStreamStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetResetPasswordTokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AuditStreamStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TokenID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UploadID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5624,7 +7308,7 @@ func (m *GetResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TokenID = string(dAtA[iNdEx:postIndex]) + m.UploadID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5648,7 +7332,7 @@ func (m *GetResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *CreateResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { +func (m *CreateStream) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5671,44 +7355,15 @@ func (m *CreateResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateResetPasswordTokenRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CreateStream: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateResetPasswordTokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateStream: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5733,78 +7388,8 @@ func (m *CreateResetPasswordTokenRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.SessionID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (Duration(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PingRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { default: iNdEx = preIndex skippy, err := skipAuth(dAtA[iNdEx:]) @@ -5827,7 +7412,7 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *PingResponse) Unmarshal(dAtA []byte) error { +func (m *ResumeStream) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5850,15 +7435,15 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ResumeStream: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResumeStream: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5883,11 +7468,11 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterName = string(dAtA[iNdEx:postIndex]) + m.SessionID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UploadID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5912,7 +7497,7 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServerVersion = string(dAtA[iNdEx:postIndex]) + m.UploadID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5936,7 +7521,7 @@ func (m *PingResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeleteUserRequest) Unmarshal(dAtA []byte) error { +func (m *CompleteStream) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5959,41 +7544,12 @@ func (m *DeleteUserRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteUserRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CompleteStream: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteUserRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CompleteStream: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAuth(dAtA[iNdEx:]) @@ -6016,7 +7572,7 @@ func (m *DeleteUserRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *Semaphores) Unmarshal(dAtA []byte) error { +func (m *FlushAndCloseStream) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6039,43 +7595,12 @@ func (m *Semaphores) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Semaphores: wiretype end group for non-group") + return fmt.Errorf("proto: FlushAndCloseStream: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Semaphores: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FlushAndCloseStream: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Semaphores", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Semaphores = append(m.Semaphores, &services.SemaphoreV3{}) - if err := m.Semaphores[len(m.Semaphores)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAuth(dAtA[iNdEx:]) @@ -6203,125 +7728,139 @@ var ( ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("auth.proto", fileDescriptor_auth_df26b5d43b9135f6) } - -var fileDescriptor_auth_df26b5d43b9135f6 = []byte{ - // 1865 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4f, 0x6f, 0xdb, 0xc8, - 0x15, 0x8f, 0x2c, 0xdb, 0xb1, 0x9e, 0x6c, 0x45, 0x1e, 0x3b, 0x36, 0xa3, 0x38, 0xa6, 0xab, 0x60, - 0x17, 0xc6, 0x36, 0xb5, 0xb7, 0x32, 0x36, 0x4d, 0x83, 0xb6, 0x81, 0x69, 0x2b, 0xb6, 0x37, 0x6e, - 0xd6, 0xa5, 0x14, 0xa5, 0x68, 0x0b, 0x08, 0xb4, 0xf4, 0x22, 0x13, 0xa6, 0x48, 0x85, 0x33, 0x72, - 0x6a, 0xa0, 0xa7, 0xa2, 0x1f, 0xa0, 0xa7, 0xa2, 0x87, 0x7e, 0x97, 0x5e, 0x73, 0xec, 0x27, 0x60, - 0xdb, 0xf4, 0xc6, 0x8f, 0x50, 0xf4, 0x50, 0xcc, 0x1f, 0x8a, 0x1c, 0x59, 0xca, 0x66, 0xb1, 0x27, - 0x71, 0xde, 0x9f, 0xdf, 0x7b, 0xf3, 0xe6, 0xcd, 0x6f, 0x9e, 0x00, 0x9c, 0x21, 0xbb, 0xd8, 0x19, - 0x84, 0x01, 0x0b, 0xc8, 0x9c, 0xf8, 0xa9, 0x3c, 0xed, 0xb9, 0xec, 0x62, 0x78, 0xbe, 0xd3, 0x09, - 0xfa, 0xbb, 0xbd, 0xd0, 0xb9, 0x72, 0x99, 0xc3, 0xdc, 0xc0, 0x77, 0xbc, 0x5d, 0x86, 0x1e, 0x0e, - 0x82, 0x90, 0xed, 0x7a, 0xee, 0xf9, 0x2e, 0xc5, 0xf0, 0xca, 0xed, 0x20, 0xdd, 0x65, 0xd7, 0x03, - 0xa4, 0x12, 0xa2, 0xb2, 0xda, 0x0b, 0x7a, 0x81, 0xf8, 0xdc, 0xe5, 0x5f, 0x4a, 0x7a, 0xbf, 0x17, - 0x04, 0x3d, 0x0f, 0x77, 0xc5, 0xea, 0x7c, 0xf8, 0x66, 0x17, 0xfb, 0x03, 0x76, 0xad, 0x94, 0xe6, - 0xb8, 0x92, 0xb9, 0x7d, 0xa4, 0xcc, 0xe9, 0x0f, 0xa4, 0x41, 0xf5, 0xef, 0x05, 0x98, 0xab, 0x5f, - 0xa1, 0xcf, 0xc8, 0x13, 0x98, 0x6d, 0x5e, 0x0f, 0xd0, 0xc8, 0x6d, 0xe5, 0xb6, 0x4b, 0xb5, 0xb2, - 0xd4, 0xef, 0x7c, 0x33, 0xc0, 0x50, 0x64, 0x68, 0x91, 0x38, 0x32, 0x4b, 0x3c, 0x9d, 0x47, 0x41, - 0xdf, 0x65, 0x22, 0x88, 0x2d, 0x3c, 0xc8, 0x6f, 0xa0, 0x64, 0x23, 0x0d, 0x86, 0x61, 0x07, 0x8f, - 0xd1, 0xe9, 0x62, 0x68, 0xcc, 0x6c, 0xe5, 0xb6, 0x8b, 0x35, 0x63, 0x27, 0xd9, 0xc6, 0x8e, 0xae, - 0xb7, 0xd6, 0xe2, 0xc8, 0x24, 0xa1, 0x92, 0xa5, 0x78, 0xc7, 0xb7, 0xec, 0x31, 0x24, 0xd2, 0x86, - 0xa5, 0x03, 0x0c, 0xd9, 0xfe, 0x90, 0x5d, 0x04, 0xa1, 0xcb, 0xae, 0x8d, 0xbc, 0x80, 0xbe, 0x97, - 0x42, 0x6b, 0xea, 0x56, 0xcd, 0xda, 0x88, 0x23, 0xd3, 0xe8, 0x60, 0xc8, 0xda, 0x4e, 0x22, 0xd5, - 0x22, 0xe8, 0x78, 0xe4, 0xb7, 0xb0, 0xd8, 0xe0, 0x67, 0xd0, 0x69, 0x06, 0x97, 0xe8, 0x53, 0x63, - 0x76, 0x3c, 0xf5, 0xac, 0xb6, 0x55, 0xb3, 0xee, 0xc7, 0x91, 0xb9, 0x4e, 0x85, 0xac, 0xcd, 0x84, - 0x50, 0x43, 0xd7, 0xc0, 0x48, 0x07, 0x4a, 0x67, 0x61, 0x70, 0xe5, 0x52, 0x37, 0xf0, 0x85, 0xc8, - 0x98, 0x13, 0xf0, 0x95, 0x14, 0x5e, 0xd7, 0xb7, 0x6a, 0xd6, 0x83, 0x38, 0x32, 0xef, 0x0d, 0x12, - 0xa9, 0x8c, 0xa1, 0x97, 0x48, 0x77, 0x21, 0xaf, 0xa1, 0x78, 0xe0, 0x0d, 0x29, 0xc3, 0xf0, 0xa5, - 0xd3, 0x47, 0x63, 0x5e, 0x44, 0x58, 0xcf, 0x14, 0x28, 0x55, 0xb6, 0x6a, 0x56, 0x25, 0x8e, 0xcc, - 0xb5, 0x8e, 0x14, 0xb5, 0x7d, 0xa7, 0xaf, 0x97, 0x3f, 0x8b, 0x24, 0x6a, 0x2f, 0x97, 0x07, 0x81, - 0xff, 0xc6, 0xed, 0x19, 0xb7, 0x6f, 0xd4, 0x3e, 0xab, 0x6e, 0xed, 0xa9, 0xda, 0x2b, 0xf0, 0x8e, - 0x90, 0x8e, 0xd5, 0x3e, 0xeb, 0x40, 0x9e, 0xc2, 0xec, 0x2b, 0x8a, 0xa1, 0xb1, 0x20, 0x70, 0xcb, - 0x29, 0x2e, 0x97, 0xb6, 0x6a, 0xb2, 0xe5, 0x86, 0x14, 0x43, 0x0d, 0x44, 0xf8, 0x70, 0x5f, 0x3b, - 0xf0, 0xd0, 0x28, 0x8c, 0xfb, 0x72, 0x69, 0x6b, 0x4f, 0xfa, 0x86, 0x81, 0xa7, 0xef, 0x4f, 0xf8, - 0x90, 0x53, 0x28, 0xf0, 0x0d, 0xd2, 0x81, 0xd3, 0x41, 0x03, 0x04, 0xc0, 0x4a, 0x0a, 0x30, 0x52, - 0x59, 0xeb, 0x71, 0x64, 0xae, 0xf8, 0xc9, 0x52, 0x03, 0x4a, 0x01, 0x88, 0x05, 0xf3, 0x0d, 0x0c, - 0xaf, 0x30, 0x34, 0x8a, 0x02, 0x8a, 0x64, 0x7a, 0x47, 0xc8, 0x5b, 0x35, 0x6b, 0x35, 0x8e, 0xcc, - 0x32, 0x15, 0x2b, 0x0d, 0x46, 0x79, 0xf2, 0x52, 0xdb, 0x78, 0x85, 0x21, 0xc5, 0xe6, 0xd0, 0xf7, - 0xd1, 0x33, 0x16, 0xc7, 0x4b, 0xad, 0xa9, 0x93, 0x36, 0x0f, 0xa5, 0xb0, 0xcd, 0x84, 0x54, 0x2f, - 0xb5, 0xe6, 0x40, 0x2e, 0xa1, 0x2c, 0xbf, 0x0e, 0x02, 0xdf, 0xc7, 0x0e, 0xbf, 0xd1, 0xc6, 0x92, - 0x88, 0xb1, 0x91, 0xc6, 0x18, 0xb7, 0x68, 0xd5, 0x2c, 0x33, 0x8e, 0xcc, 0xfb, 0x12, 0x9e, 0x1f, - 0xa8, 0x52, 0x68, 0x91, 0x6e, 0x00, 0xf3, 0xdd, 0xec, 0x77, 0x3a, 0x48, 0xa9, 0x8d, 0x6f, 0x87, - 0x48, 0x99, 0x51, 0x1a, 0xdf, 0x8d, 0xa6, 0x4e, 0x1a, 0xc7, 0x11, 0xc2, 0x76, 0x28, 0xa5, 0xfa, - 0x6e, 0x34, 0x07, 0x0b, 0x60, 0x21, 0xe1, 0x89, 0xea, 0x31, 0xcc, 0xbd, 0x76, 0x58, 0xe7, 0x82, - 0x3c, 0x83, 0xb9, 0x17, 0xae, 0xdf, 0xa5, 0x46, 0x6e, 0x2b, 0x2f, 0x5a, 0x42, 0x32, 0x98, 0x50, - 0x72, 0x85, 0xb5, 0xfe, 0x3e, 0x32, 0x6f, 0xc5, 0x91, 0x79, 0xe7, 0x92, 0x9b, 0x65, 0x68, 0x4c, - 0xfa, 0x55, 0xff, 0x38, 0x03, 0x85, 0x91, 0x35, 0xd9, 0x80, 0x59, 0xfe, 0x2b, 0xf8, 0xb0, 0x60, - 0x2d, 0xc4, 0x91, 0x39, 0xcb, 0xfd, 0x6c, 0x21, 0x25, 0x35, 0x28, 0x9e, 0x06, 0x4e, 0xb7, 0x81, - 0x9d, 0x10, 0x19, 0x15, 0x84, 0xb7, 0x60, 0x95, 0xe3, 0xc8, 0x5c, 0xf4, 0x02, 0xa7, 0xdb, 0xa6, - 0x52, 0x6e, 0x67, 0x8d, 0x38, 0xa2, 0xb8, 0xa1, 0xf9, 0x14, 0x91, 0x37, 0x97, 0x2d, 0xa4, 0xe4, - 0x6b, 0x98, 0x7f, 0xee, 0x7a, 0x0c, 0x43, 0x63, 0x56, 0xe4, 0xbf, 0x31, 0x9e, 0xff, 0x8e, 0x54, - 0xd7, 0x7d, 0x16, 0x5e, 0xcb, 0x86, 0x7a, 0x23, 0x04, 0x99, 0x8d, 0x28, 0x84, 0xca, 0x4f, 0xa1, - 0x98, 0x31, 0x26, 0x65, 0xc8, 0x5f, 0xe2, 0xb5, 0xdc, 0x89, 0xcd, 0x3f, 0xc9, 0x2a, 0xcc, 0x5d, - 0x39, 0xde, 0x10, 0x45, 0xe2, 0x05, 0x5b, 0x2e, 0x9e, 0xce, 0x3c, 0xc9, 0x55, 0x7f, 0x05, 0x73, - 0x9c, 0x20, 0x29, 0x79, 0x08, 0xf9, 0x46, 0xe3, 0x58, 0x38, 0x2d, 0x5a, 0xcb, 0x71, 0x64, 0x2e, - 0x51, 0x7a, 0x91, 0x89, 0xc5, 0xb5, 0xdc, 0xa8, 0x79, 0xda, 0x10, 0x28, 0xca, 0x88, 0x79, 0xd9, - 0xca, 0x72, 0x6d, 0xf5, 0x7f, 0x33, 0x50, 0xe6, 0x77, 0x56, 0xe0, 0xaa, 0x23, 0x24, 0x8f, 0xa0, - 0x70, 0x36, 0x3c, 0xf7, 0xdc, 0xce, 0x0b, 0x95, 0xd9, 0xa2, 0x55, 0x8a, 0x23, 0x13, 0x06, 0x42, - 0xd8, 0xbe, 0xc4, 0x6b, 0x3b, 0x35, 0x20, 0xdb, 0xb0, 0xc0, 0x11, 0x78, 0xb9, 0x64, 0xca, 0xd6, - 0x62, 0x1c, 0x99, 0x0b, 0x43, 0x25, 0xb3, 0x47, 0x5a, 0xd2, 0x80, 0xdb, 0xf5, 0xdf, 0x0f, 0xdc, - 0x10, 0xa9, 0x7a, 0x2a, 0x2a, 0x3b, 0xf2, 0x0d, 0xdc, 0x49, 0xde, 0xc0, 0x9d, 0x66, 0xf2, 0x06, - 0x5a, 0x0f, 0x54, 0x47, 0x2c, 0xa3, 0x74, 0x49, 0x33, 0xff, 0xf3, 0x3f, 0xcd, 0x9c, 0x9d, 0x20, - 0x91, 0x47, 0x30, 0xff, 0x3c, 0x08, 0xfb, 0x0e, 0x13, 0xcf, 0x43, 0x41, 0x55, 0x5f, 0x48, 0xb4, - 0xea, 0x0b, 0x09, 0x79, 0x0e, 0x25, 0x3b, 0x18, 0x32, 0x6c, 0x06, 0x8a, 0xee, 0x04, 0xeb, 0x17, - 0xac, 0xcd, 0x38, 0x32, 0x2b, 0x21, 0xd7, 0xb4, 0x59, 0xd0, 0x56, 0x34, 0x99, 0xf1, 0x1f, 0xf3, - 0x22, 0x75, 0x28, 0x69, 0x6d, 0x4f, 0x8d, 0xf9, 0xad, 0xfc, 0x76, 0x41, 0xbe, 0x10, 0xfa, 0x65, - 0xc9, 0xd6, 0x7c, 0xcc, 0xa9, 0xea, 0x41, 0xe9, 0x08, 0x19, 0x2f, 0x50, 0x52, 0xfb, 0xa4, 0x11, - 0x73, 0x13, 0x1b, 0xf1, 0x67, 0x50, 0x7c, 0xed, 0xb2, 0x0b, 0xbd, 0xb5, 0xc5, 0xb3, 0xf1, 0xce, - 0x65, 0x17, 0x49, 0x6b, 0x67, 0x02, 0x66, 0xcd, 0xab, 0x75, 0xb8, 0xa3, 0xa2, 0x8d, 0x8e, 0xba, - 0xa6, 0x03, 0xe6, 0xd2, 0xbb, 0x92, 0x05, 0xd4, 0x61, 0x2e, 0xc6, 0xf7, 0x4e, 0x5a, 0x37, 0xaa, - 0x21, 0xef, 0xf9, 0x47, 0x58, 0x65, 0x85, 0x5f, 0xf6, 0xb1, 0x42, 0xdd, 0x28, 0xcf, 0xaf, 0x61, - 0xe9, 0xcc, 0x1b, 0xf6, 0x5c, 0xff, 0xd0, 0x61, 0x4e, 0x03, 0xdf, 0x92, 0x23, 0x80, 0x54, 0xa0, - 0x82, 0xac, 0x65, 0x1e, 0xec, 0x91, 0xae, 0xb5, 0x67, 0xdd, 0x89, 0x23, 0xb3, 0x38, 0x10, 0x92, - 0x76, 0xd7, 0x61, 0x8e, 0x9d, 0x71, 0xad, 0xfe, 0x2d, 0x07, 0x44, 0x85, 0xe1, 0x53, 0x01, 0x36, - 0x90, 0xf1, 0x63, 0x5d, 0x83, 0x99, 0x93, 0x43, 0x55, 0xfb, 0xf9, 0x38, 0x32, 0x67, 0xdc, 0xae, - 0x3d, 0x73, 0x72, 0x48, 0x7e, 0x02, 0x73, 0xc2, 0x4c, 0x54, 0xbc, 0x94, 0x0d, 0x99, 0x05, 0xb1, - 0x0a, 0x71, 0x64, 0xce, 0xf1, 0x01, 0x04, 0x6d, 0x69, 0x4f, 0xbe, 0x82, 0xc2, 0x21, 0x7a, 0xd8, - 0x73, 0x58, 0x10, 0x2a, 0x72, 0x11, 0x2f, 0x57, 0x37, 0x11, 0x66, 0xce, 0x2a, 0xb5, 0xac, 0x3e, - 0x84, 0x82, 0x02, 0x3e, 0x39, 0x9c, 0x96, 0x54, 0xf5, 0x97, 0xf0, 0xb9, 0x1d, 0x88, 0x60, 0x48, - 0x91, 0x9d, 0x39, 0x94, 0xbe, 0x0b, 0xc2, 0xae, 0x18, 0x3c, 0xd4, 0x51, 0x25, 0xa7, 0xfc, 0x10, - 0x6e, 0x0b, 0xf1, 0x08, 0x46, 0x24, 0x2a, 0xc6, 0x17, 0x3b, 0xd1, 0x54, 0x0f, 0x60, 0xe3, 0x08, - 0xd9, 0x4d, 0xac, 0xef, 0x04, 0xf2, 0xa7, 0x1c, 0x98, 0x07, 0x21, 0x4e, 0x4c, 0xea, 0xd3, 0x5a, - 0x7c, 0x43, 0xcd, 0xba, 0x33, 0xa9, 0x96, 0x4f, 0xb6, 0x6a, 0x9e, 0xfd, 0x0c, 0xf2, 0xcd, 0xe6, - 0xa9, 0xa8, 0x64, 0x5e, 0xf4, 0x50, 0x9e, 0x31, 0xef, 0xbf, 0x91, 0xb9, 0x70, 0x38, 0x94, 0xb3, - 0xb0, 0xcd, 0xf5, 0xd5, 0x25, 0x28, 0x9e, 0xb9, 0x7e, 0x4f, 0x45, 0xac, 0xfe, 0x01, 0x16, 0xe5, - 0x92, 0x0e, 0x02, 0x9f, 0x22, 0xef, 0xfa, 0xec, 0x58, 0x26, 0x13, 0x11, 0x5d, 0x9f, 0x9d, 0xbe, - 0xf4, 0x89, 0xeb, 0x09, 0x2c, 0xa9, 0x91, 0x01, 0x43, 0x3e, 0xe0, 0xa9, 0x04, 0xc5, 0x2c, 0x23, - 0xa7, 0x87, 0xf6, 0x95, 0xd4, 0xd8, 0xba, 0x61, 0xf5, 0xc7, 0xb0, 0xcc, 0x4f, 0x96, 0xe1, 0x27, - 0xdf, 0xf3, 0x6a, 0x03, 0xa0, 0x81, 0x7d, 0x67, 0x70, 0x11, 0x70, 0x8a, 0xab, 0x67, 0x57, 0xaa, - 0xeb, 0xef, 0x66, 0x27, 0x19, 0xa5, 0x6b, 0xed, 0x49, 0x9e, 0xa6, 0x23, 0x63, 0x3b, 0xe3, 0xf8, - 0xc5, 0x17, 0x50, 0x18, 0xfd, 0x65, 0x20, 0x0b, 0x30, 0x7b, 0xf2, 0xf2, 0xa4, 0x59, 0xbe, 0x45, - 0x6e, 0x43, 0xfe, 0xec, 0x55, 0xb3, 0x9c, 0x23, 0x00, 0xf3, 0x87, 0xf5, 0xd3, 0x7a, 0xb3, 0x5e, - 0x9e, 0xa9, 0xfd, 0x65, 0x09, 0x8a, 0x7c, 0x10, 0x6f, 0xc8, 0x20, 0xe4, 0x19, 0x94, 0x1a, 0xe8, - 0x77, 0x5f, 0x20, 0x0e, 0xf6, 0x3d, 0xf7, 0x0a, 0x29, 0xc9, 0x4c, 0x65, 0x23, 0x69, 0x65, 0xed, - 0x06, 0xa1, 0xd7, 0x79, 0x6b, 0x6f, 0xe7, 0xc8, 0x0f, 0xa1, 0x28, 0x5e, 0x4b, 0xf1, 0x87, 0x86, - 0x92, 0xc5, 0xec, 0x0b, 0x5a, 0x49, 0x56, 0x42, 0xf9, 0x65, 0x8e, 0x7c, 0x05, 0xf0, 0x6a, 0x40, - 0x31, 0x64, 0x2f, 0x83, 0x2e, 0x92, 0x09, 0x43, 0x5b, 0x65, 0x52, 0x74, 0xf2, 0x14, 0x96, 0x8f, - 0xd0, 0xe7, 0x3b, 0xc4, 0xd1, 0x9b, 0x46, 0xd6, 0x15, 0xf6, 0xf8, 0x2b, 0x37, 0x0a, 0x2a, 0xcd, - 0x6a, 0x70, 0x5b, 0x71, 0x23, 0xb9, 0xab, 0x14, 0x3a, 0x33, 0x57, 0x6e, 0xcc, 0xc0, 0xe4, 0x31, - 0x2c, 0x24, 0x7c, 0x4a, 0xd6, 0x74, 0x27, 0x3a, 0xd5, 0xeb, 0xcb, 0x1c, 0x39, 0xe1, 0x79, 0xb2, - 0x31, 0x0e, 0x7d, 0x30, 0x85, 0x2b, 0xd5, 0xd8, 0x90, 0x24, 0x35, 0xe6, 0x75, 0x0c, 0x2b, 0xf2, - 0xba, 0x69, 0x72, 0x32, 0x9d, 0x78, 0xa7, 0x1d, 0x11, 0x79, 0x06, 0x2b, 0xb2, 0x4b, 0x75, 0xa4, - 0x64, 0x54, 0x1b, 0xd1, 0xd1, 0x54, 0x80, 0xaf, 0xe1, 0x6e, 0x63, 0x6c, 0x57, 0x92, 0x03, 0xef, - 0xe9, 0x10, 0x19, 0xbe, 0x9d, 0x8a, 0xb5, 0x0f, 0x4b, 0x47, 0xc8, 0x52, 0xbe, 0x26, 0x95, 0x49, - 0x24, 0xaf, 0x4a, 0xb3, 0xaa, 0xf0, 0xf5, 0xa7, 0xe2, 0x14, 0xca, 0xaf, 0x06, 0x5d, 0x87, 0x61, - 0x06, 0x65, 0x6b, 0x12, 0x8a, 0xb2, 0x72, 0x42, 0xa7, 0x4f, 0xa7, 0x26, 0xb4, 0x0b, 0xb3, 0x9c, - 0x41, 0x08, 0x49, 0x62, 0xa5, 0xec, 0x52, 0x59, 0xd1, 0x64, 0x8a, 0x62, 0xde, 0x81, 0xf9, 0x2d, - 0xe4, 0x4c, 0x7e, 0x94, 0xd4, 0xe5, 0x93, 0x48, 0xbc, 0xf2, 0x99, 0xf6, 0x97, 0x7d, 0xb2, 0x6d, - 0x6b, 0x8f, 0xfc, 0x0e, 0xee, 0x4e, 0xa4, 0x71, 0xf2, 0x30, 0xed, 0xd0, 0xa9, 0xdc, 0x5c, 0xd9, - 0xfc, 0x58, 0x90, 0xd6, 0x1e, 0x39, 0x07, 0x63, 0x1a, 0xbd, 0x93, 0xcf, 0x93, 0x0b, 0xf5, 0x71, - 0xfe, 0xff, 0xd6, 0x18, 0x8f, 0x01, 0x24, 0x84, 0xb8, 0x8d, 0x37, 0x2e, 0xd0, 0xd4, 0x33, 0x7a, - 0xcc, 0x59, 0xa3, 0xfb, 0xdd, 0xfd, 0x7e, 0x01, 0x90, 0xf2, 0x33, 0x31, 0xd4, 0x2e, 0x6e, 0x50, - 0xf6, 0x54, 0xff, 0x6f, 0xa0, 0xbc, 0xdf, 0x79, 0x3b, 0x74, 0x43, 0x1c, 0x91, 0x2d, 0xf9, 0x41, - 0xf6, 0x02, 0xea, 0xba, 0x04, 0xce, 0x98, 0xc0, 0xe0, 0xa7, 0xe8, 0x50, 0x24, 0x2f, 0x60, 0x7d, - 0x44, 0x6a, 0x63, 0xaa, 0xa9, 0x4e, 0x53, 0xb3, 0x3b, 0x86, 0xd5, 0x03, 0xc7, 0xef, 0xa0, 0xf7, - 0xbd, 0x91, 0x7e, 0x2e, 0x2e, 0x65, 0xe6, 0x5d, 0xba, 0x37, 0x01, 0x42, 0xdd, 0xc9, 0x65, 0x55, - 0xc5, 0x8c, 0xf5, 0x21, 0xdc, 0x91, 0x35, 0x4d, 0xab, 0xf4, 0x11, 0x80, 0x29, 0x49, 0x58, 0xab, - 0xef, 0xff, 0xbd, 0x99, 0x7b, 0xff, 0x61, 0x33, 0xf7, 0x8f, 0x0f, 0x9b, 0xb9, 0x7f, 0x7d, 0xd8, - 0xcc, 0xfd, 0xf5, 0x3f, 0x9b, 0xb7, 0xce, 0xe7, 0x85, 0xd5, 0xde, 0xff, 0x03, 0x00, 0x00, 0xff, - 0xff, 0x55, 0x42, 0x2d, 0x6e, 0xd6, 0x13, 0x00, 0x00, +func init() { proto.RegisterFile("auth.proto", fileDescriptor_auth_4edeb1cba1dbd286) } + +var fileDescriptor_auth_4edeb1cba1dbd286 = []byte{ + // 2087 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5f, 0x6f, 0x1b, 0x4b, + 0x15, 0xcf, 0x3a, 0x7f, 0x7d, 0x92, 0xb8, 0xce, 0x24, 0x4d, 0xb6, 0xbe, 0x69, 0xb6, 0xb8, 0xea, + 0x55, 0x74, 0x29, 0x49, 0x71, 0x74, 0x7b, 0xdb, 0x0a, 0xa8, 0xb2, 0x89, 0xdb, 0xe4, 0x36, 0xb4, + 0x61, 0xed, 0xba, 0x08, 0x90, 0xac, 0x8d, 0x3d, 0x4d, 0x56, 0x5d, 0xef, 0xba, 0x3b, 0xb3, 0x29, + 0x91, 0x78, 0x42, 0x7c, 0x00, 0x1e, 0x41, 0xe2, 0xbb, 0xf0, 0xda, 0x17, 0x24, 0x3e, 0xc1, 0x02, + 0xe5, 0xcd, 0x1f, 0x01, 0xf1, 0x80, 0xe6, 0xcf, 0x7a, 0x67, 0xd6, 0x76, 0xe8, 0x85, 0xa7, 0x78, + 0xce, 0x9f, 0xdf, 0x39, 0x73, 0xe6, 0xcc, 0xfc, 0xce, 0x06, 0xc0, 0x8d, 0xe9, 0xc5, 0x4e, 0x3f, + 0x0a, 0x69, 0x88, 0x66, 0xf9, 0x9f, 0xca, 0xda, 0x79, 0x78, 0x1e, 0xf2, 0x9f, 0xbb, 0xec, 0x97, + 0x50, 0x56, 0xbe, 0x38, 0x0f, 0xc3, 0x73, 0x1f, 0xef, 0xf2, 0xd5, 0x59, 0xfc, 0x76, 0x17, 0xf7, + 0xfa, 0xf4, 0x4a, 0x2a, 0xad, 0xbc, 0x92, 0x7a, 0x3d, 0x4c, 0xa8, 0xdb, 0xeb, 0x4b, 0x83, 0x27, + 0xe7, 0x1e, 0xbd, 0x88, 0xcf, 0x76, 0x3a, 0x61, 0x6f, 0xf7, 0x3c, 0x72, 0x2f, 0x3d, 0xea, 0x52, + 0x2f, 0x0c, 0x5c, 0x7f, 0x97, 0x62, 0x1f, 0xf7, 0xc3, 0x88, 0xee, 0xfa, 0xde, 0xd9, 0x2e, 0xc1, + 0xd1, 0xa5, 0xd7, 0xc1, 0x64, 0x97, 0x5e, 0xf5, 0x31, 0x91, 0xbe, 0x8f, 0x3f, 0xcf, 0x17, 0x5f, + 0xe2, 0x80, 0x12, 0xf9, 0x47, 0xb8, 0x56, 0xff, 0x5c, 0x84, 0xd9, 0x3a, 0x13, 0xa0, 0x47, 0x30, + 0xd3, 0xbc, 0xea, 0x63, 0xd3, 0xb8, 0x63, 0x6c, 0x97, 0x6a, 0x65, 0xa1, 0xdf, 0x79, 0xd5, 0xc7, + 0x11, 0x07, 0xb3, 0xd1, 0x20, 0xb1, 0x4a, 0x2c, 0xea, 0xfd, 0xb0, 0xe7, 0x51, 0xbe, 0x37, 0x87, + 0x7b, 0xa0, 0x5f, 0x40, 0xc9, 0xc1, 0x24, 0x8c, 0xa3, 0x0e, 0x3e, 0xc2, 0x6e, 0x17, 0x47, 0x66, + 0xe1, 0x8e, 0xb1, 0xbd, 0x58, 0x33, 0x77, 0xd2, 0x6c, 0x77, 0x74, 0xbd, 0xbd, 0x3e, 0x48, 0x2c, + 0x14, 0x49, 0x59, 0x86, 0x77, 0x34, 0xe5, 0xe4, 0x90, 0x50, 0x1b, 0x96, 0x0f, 0x70, 0x44, 0xf7, + 0x63, 0x7a, 0x11, 0x46, 0x1e, 0xbd, 0x32, 0xa7, 0x39, 0xf4, 0xad, 0x0c, 0x5a, 0x53, 0xb7, 0x6a, + 0xf6, 0xe6, 0x20, 0xb1, 0xcc, 0x0e, 0x8e, 0x68, 0xdb, 0x4d, 0xa5, 0x5a, 0x04, 0x1d, 0x0f, 0xfd, + 0x12, 0x96, 0x1a, 0xac, 0x5c, 0x9d, 0x66, 0xf8, 0x0e, 0x07, 0xc4, 0x9c, 0xc9, 0xa7, 0xae, 0x6a, + 0x5b, 0x35, 0xfb, 0x8b, 0x41, 0x62, 0x6d, 0x10, 0x2e, 0x6b, 0x53, 0x2e, 0xd4, 0xd0, 0x35, 0x30, + 0xd4, 0x81, 0xd2, 0x69, 0x14, 0x5e, 0x7a, 0xc4, 0x0b, 0x03, 0x2e, 0x32, 0x67, 0x39, 0x7c, 0x25, + 0x83, 0xd7, 0xf5, 0xad, 0x9a, 0x7d, 0x7b, 0x90, 0x58, 0xb7, 0xfa, 0xa9, 0x54, 0xc4, 0xd0, 0x4b, + 0xa4, 0xbb, 0xa0, 0x37, 0xb0, 0x78, 0xe0, 0xc7, 0x84, 0xe2, 0xe8, 0xa5, 0xdb, 0xc3, 0xe6, 0x1c, + 0x8f, 0xb0, 0xa1, 0x14, 0x28, 0x53, 0xb6, 0x6a, 0x76, 0x65, 0x90, 0x58, 0xeb, 0x1d, 0x21, 0x6a, + 0x07, 0x6e, 0x4f, 0x2f, 0xbf, 0x8a, 0xc4, 0x6b, 0x2f, 0x96, 0x07, 0x61, 0xf0, 0xd6, 0x3b, 0x37, + 0xe7, 0x47, 0x6a, 0xaf, 0xaa, 0x5b, 0x7b, 0xb2, 0xf6, 0x12, 0xbc, 0xc3, 0xa5, 0xb9, 0xda, 0xab, + 0x0e, 0xe8, 0x09, 0xcc, 0xbc, 0x26, 0x38, 0x32, 0x17, 0x38, 0x6e, 0x39, 0xc3, 0x65, 0xd2, 0x56, + 0x4d, 0xb4, 0x5c, 0x4c, 0x70, 0xa4, 0x81, 0x70, 0x1f, 0xe6, 0xeb, 0x84, 0x3e, 0x36, 0x8b, 0x79, + 0x5f, 0x26, 0x6d, 0xed, 0x09, 0xdf, 0x28, 0xf4, 0xf5, 0xfd, 0x71, 0x1f, 0x74, 0x02, 0x45, 0xb6, + 0x41, 0xd2, 0x77, 0x3b, 0xd8, 0x04, 0x0e, 0xb0, 0x9a, 0x01, 0x0c, 0x55, 0xf6, 0xc6, 0x20, 0xb1, + 0x56, 0x83, 0x74, 0xa9, 0x01, 0x65, 0x00, 0xc8, 0x86, 0xb9, 0x06, 0x8e, 0x2e, 0x71, 0x64, 0x2e, + 0x72, 0x28, 0xa4, 0xf4, 0x0e, 0x97, 0xb7, 0x6a, 0xf6, 0xda, 0x20, 0xb1, 0xca, 0x84, 0xaf, 0x34, + 0x18, 0xe9, 0xc9, 0x4a, 0xed, 0xe0, 0x4b, 0x1c, 0x11, 0xdc, 0x8c, 0x83, 0x00, 0xfb, 0xe6, 0x52, + 0xbe, 0xd4, 0x9a, 0x3a, 0x6d, 0xf3, 0x48, 0x08, 0xdb, 0x94, 0x4b, 0xf5, 0x52, 0x6b, 0x0e, 0xe8, + 0x1d, 0x94, 0xc5, 0xaf, 0x83, 0x30, 0x08, 0x70, 0x87, 0xdd, 0x68, 0x73, 0x99, 0xc7, 0xd8, 0xcc, + 0x62, 0xe4, 0x2d, 0x5a, 0x35, 0xdb, 0x1a, 0x24, 0xd6, 0x17, 0x02, 0x9e, 0x1d, 0xa8, 0x54, 0x68, + 0x91, 0x46, 0x80, 0xd9, 0x6e, 0xf6, 0x3b, 0x1d, 0x4c, 0x88, 0x83, 0xdf, 0xc7, 0x98, 0x50, 0xb3, + 0x94, 0xdf, 0x8d, 0xa6, 0x4e, 0x1b, 0xc7, 0xe5, 0xc2, 0x76, 0x24, 0xa4, 0xfa, 0x6e, 0x34, 0x07, + 0x1b, 0x60, 0x21, 0x7d, 0x27, 0xaa, 0x47, 0x30, 0xfb, 0xc6, 0xa5, 0x9d, 0x0b, 0xf4, 0x14, 0x66, + 0x5f, 0x78, 0x41, 0x97, 0x98, 0xc6, 0x9d, 0x69, 0xde, 0x12, 0xe2, 0x05, 0xe3, 0x4a, 0xa6, 0xb0, + 0x37, 0x3e, 0x26, 0xd6, 0xd4, 0x20, 0xb1, 0x6e, 0xbc, 0x63, 0x66, 0xca, 0x33, 0x26, 0xfc, 0xaa, + 0xbf, 0x2d, 0x40, 0x71, 0x68, 0x8d, 0x36, 0x61, 0x86, 0xfd, 0xe5, 0xef, 0x61, 0xd1, 0x5e, 0x18, + 0x24, 0xd6, 0x0c, 0xf3, 0x73, 0xb8, 0x14, 0xd5, 0x60, 0xf1, 0x24, 0x74, 0xbb, 0x0d, 0xdc, 0x89, + 0x30, 0x25, 0xfc, 0xc1, 0x5b, 0xb0, 0xcb, 0x83, 0xc4, 0x5a, 0xf2, 0x43, 0xb7, 0xdb, 0x26, 0x42, + 0xee, 0xa8, 0x46, 0x0c, 0x91, 0xdf, 0xd0, 0xe9, 0x0c, 0x91, 0x35, 0x97, 0xc3, 0xa5, 0xe8, 0x5b, + 0x98, 0x7b, 0xe6, 0xf9, 0x14, 0x47, 0xe6, 0x0c, 0xcf, 0x7f, 0x33, 0x9f, 0xff, 0x8e, 0x50, 0xd7, + 0x03, 0x1a, 0x5d, 0x89, 0x86, 0x7a, 0xcb, 0x05, 0xca, 0x46, 0x24, 0x42, 0xe5, 0x31, 0x2c, 0x2a, + 0xc6, 0xa8, 0x0c, 0xd3, 0xef, 0xf0, 0x95, 0xd8, 0x89, 0xc3, 0x7e, 0xa2, 0x35, 0x98, 0xbd, 0x74, + 0xfd, 0x18, 0xf3, 0xc4, 0x8b, 0x8e, 0x58, 0x3c, 0x29, 0x3c, 0x32, 0xaa, 0x3f, 0x83, 0x59, 0xf6, + 0x40, 0x12, 0x74, 0x17, 0xa6, 0x1b, 0x8d, 0x23, 0xee, 0xb4, 0x64, 0xaf, 0x0c, 0x12, 0x6b, 0x99, + 0x90, 0x0b, 0x25, 0x16, 0xd3, 0x32, 0xa3, 0xe6, 0x49, 0x83, 0xa3, 0x48, 0x23, 0xea, 0xab, 0x95, + 0x65, 0xda, 0xea, 0xbf, 0x0b, 0x50, 0x66, 0x77, 0x96, 0xe3, 0xca, 0x23, 0x44, 0xf7, 0xa1, 0x78, + 0x1a, 0x9f, 0xf9, 0x5e, 0xe7, 0x85, 0xcc, 0x6c, 0xc9, 0x2e, 0x0d, 0x12, 0x0b, 0xfa, 0x5c, 0xd8, + 0x7e, 0x87, 0xaf, 0x9c, 0xcc, 0x00, 0x6d, 0xc3, 0x02, 0x43, 0x60, 0xe5, 0x12, 0x29, 0xdb, 0x4b, + 0x83, 0xc4, 0x5a, 0x88, 0xa5, 0xcc, 0x19, 0x6a, 0x51, 0x03, 0xe6, 0xeb, 0xbf, 0xee, 0x7b, 0x11, + 0x26, 0x92, 0x2a, 0x2a, 0x3b, 0x82, 0x7a, 0x77, 0x52, 0xea, 0xdd, 0x69, 0xa6, 0xd4, 0x6b, 0xdf, + 0x96, 0x1d, 0xb1, 0x82, 0x85, 0x4b, 0x96, 0xf9, 0xef, 0xff, 0x66, 0x19, 0x4e, 0x8a, 0x84, 0xee, + 0xc3, 0xdc, 0xb3, 0x30, 0xea, 0xb9, 0x94, 0xd3, 0x43, 0x51, 0x56, 0x9f, 0x4b, 0xb4, 0xea, 0x73, + 0x09, 0x7a, 0x06, 0x25, 0x27, 0x8c, 0x29, 0x6e, 0x86, 0xf2, 0xb9, 0xe3, 0xaf, 0x7e, 0xd1, 0xde, + 0x1a, 0x24, 0x56, 0x25, 0x62, 0x9a, 0x36, 0x0d, 0xdb, 0xf2, 0x99, 0x54, 0xfc, 0x73, 0x5e, 0xa8, + 0x0e, 0x25, 0xad, 0xed, 0x89, 0x39, 0x77, 0x67, 0x7a, 0xbb, 0x28, 0x18, 0x42, 0xbf, 0x2c, 0x6a, + 0xcd, 0x73, 0x4e, 0x55, 0x1f, 0x4a, 0xcf, 0x31, 0x65, 0x05, 0x4a, 0x6b, 0x9f, 0x36, 0xa2, 0x31, + 0xb6, 0x11, 0x7f, 0x04, 0x8b, 0x6f, 0x3c, 0x7a, 0xa1, 0xb7, 0x36, 0xa7, 0x8d, 0x0f, 0x1e, 0xbd, + 0x48, 0x5b, 0x5b, 0x09, 0xa8, 0x9a, 0x57, 0xeb, 0x70, 0x43, 0x46, 0x1b, 0x1e, 0x75, 0x4d, 0x07, + 0x34, 0xb2, 0xbb, 0xa2, 0x02, 0xea, 0x30, 0x17, 0xf9, 0xbd, 0xa3, 0xd6, 0x48, 0x35, 0xc4, 0x3d, + 0xbf, 0xe6, 0x55, 0x59, 0x65, 0x97, 0x3d, 0x57, 0xa8, 0x91, 0xf2, 0xfc, 0x1c, 0x96, 0x4f, 0xfd, + 0xf8, 0xdc, 0x0b, 0x0e, 0x5d, 0xea, 0x36, 0xf0, 0x7b, 0xf4, 0x1c, 0x20, 0x13, 0xc8, 0x20, 0xeb, + 0x0a, 0x61, 0x0f, 0x75, 0xad, 0x3d, 0xfb, 0xc6, 0x20, 0xb1, 0x16, 0xfb, 0x5c, 0xd2, 0xee, 0xba, + 0xd4, 0x75, 0x14, 0xd7, 0xea, 0x9f, 0x0c, 0x40, 0x32, 0x0c, 0x9b, 0x0a, 0x70, 0x03, 0x53, 0x76, + 0xac, 0xeb, 0x50, 0x38, 0x3e, 0x94, 0xb5, 0x9f, 0x1b, 0x24, 0x56, 0xc1, 0xeb, 0x3a, 0x85, 0xe3, + 0x43, 0xf4, 0x0d, 0xcc, 0x72, 0x33, 0x5e, 0xf1, 0x92, 0x1a, 0x52, 0x05, 0xb1, 0x8b, 0x83, 0xc4, + 0x9a, 0x65, 0x03, 0x08, 0x76, 0x84, 0x3d, 0xfa, 0x1a, 0x8a, 0x87, 0xd8, 0xc7, 0xe7, 0x2e, 0x0d, + 0x23, 0xf9, 0xb8, 0x70, 0xe6, 0xea, 0xa6, 0x42, 0xe5, 0xac, 0x32, 0xcb, 0xea, 0x5d, 0x28, 0x4a, + 0xe0, 0xe3, 0xc3, 0x49, 0x49, 0x55, 0x7f, 0x0a, 0x5f, 0x3a, 0x21, 0x0f, 0x86, 0x09, 0xa6, 0xa7, + 0x2e, 0x21, 0x1f, 0xc2, 0xa8, 0xcb, 0x07, 0x0f, 0x79, 0x54, 0xe9, 0x29, 0xdf, 0x85, 0x79, 0x2e, + 0x1e, 0xc2, 0xf0, 0x44, 0xf9, 0xf8, 0xe2, 0xa4, 0x9a, 0xea, 0x01, 0x6c, 0x3e, 0xc7, 0x74, 0x14, + 0xeb, 0x3b, 0x81, 0xfc, 0xce, 0x00, 0xeb, 0x20, 0xc2, 0x63, 0x93, 0xfa, 0xbc, 0x16, 0xdf, 0x94, + 0xb3, 0x6e, 0x21, 0xd3, 0xb2, 0xc9, 0x56, 0xce, 0xb3, 0xf7, 0x60, 0xba, 0xd9, 0x3c, 0xe1, 0x95, + 0x9c, 0xe6, 0x3d, 0x34, 0x4d, 0xa9, 0xff, 0xaf, 0xc4, 0x5a, 0x38, 0x8c, 0xc5, 0x2c, 0xec, 0x30, + 0x7d, 0x75, 0x19, 0x16, 0x4f, 0xbd, 0xe0, 0x5c, 0x46, 0xac, 0xfe, 0x06, 0x96, 0xc4, 0x92, 0xf4, + 0xc3, 0x80, 0x60, 0xd6, 0xf5, 0xea, 0x58, 0x26, 0x12, 0xe1, 0x5d, 0xaf, 0x4e, 0x5f, 0xfa, 0xc4, + 0xf5, 0x08, 0x96, 0xe5, 0xc8, 0x80, 0x23, 0x36, 0xe0, 0xc9, 0x04, 0xf9, 0x2c, 0x23, 0xa6, 0x87, + 0xf6, 0xa5, 0xd0, 0x38, 0xba, 0x61, 0xf5, 0x87, 0xb0, 0xc2, 0x4e, 0x96, 0xe2, 0xcf, 0xbe, 0xe7, + 0xd5, 0x06, 0x40, 0x03, 0xf7, 0xdc, 0xfe, 0x45, 0xc8, 0x9e, 0xb8, 0xba, 0xba, 0x92, 0x5d, 0x7f, + 0x53, 0x9d, 0x64, 0xa4, 0xae, 0xb5, 0x27, 0xde, 0x69, 0x32, 0x34, 0x76, 0x14, 0xc7, 0xea, 0x5f, + 0x0a, 0x80, 0xf6, 0xe3, 0xae, 0x47, 0x1b, 0x34, 0xc2, 0x6e, 0x2f, 0xcd, 0xe4, 0x31, 0x2c, 0x89, + 0x13, 0x13, 0x62, 0x9e, 0x11, 0x1b, 0xba, 0x04, 0xc5, 0xa9, 0x2a, 0x36, 0x43, 0xab, 0x6b, 0xe6, + 0xea, 0x60, 0x12, 0xf7, 0x52, 0xd7, 0x82, 0xe6, 0xaa, 0xaa, 0x98, 0xab, 0xba, 0x46, 0x4f, 0xa1, + 0x74, 0x10, 0xf6, 0xfa, 0xac, 0x2c, 0xd2, 0x59, 0x50, 0xc2, 0xcd, 0x34, 0xae, 0xa6, 0x64, 0xa3, + 0xb5, 0x2e, 0x41, 0x2f, 0x61, 0xf5, 0x99, 0x1f, 0x93, 0x8b, 0xfd, 0xa0, 0x7b, 0xe0, 0x87, 0x24, + 0x45, 0x99, 0x91, 0xc4, 0x22, 0x50, 0xc6, 0x58, 0x1c, 0x4d, 0x39, 0xe3, 0x1c, 0xd1, 0x3d, 0xf9, + 0xb1, 0x25, 0x3f, 0x03, 0x96, 0x77, 0xe4, 0xb7, 0xd8, 0xab, 0x00, 0xbf, 0x7a, 0x7b, 0x34, 0xe5, + 0x08, 0xad, 0x5d, 0x84, 0xf9, 0xb4, 0xab, 0x76, 0x61, 0x45, 0x29, 0x27, 0xbb, 0xef, 0x31, 0x41, + 0x15, 0x58, 0x78, 0xdd, 0x67, 0x73, 0x46, 0x7a, 0x4d, 0x9c, 0xe1, 0xba, 0x7a, 0x5f, 0xaf, 0x34, + 0xda, 0x84, 0x62, 0x03, 0x13, 0xd6, 0x23, 0x43, 0xe3, 0x4c, 0x50, 0x3d, 0xd2, 0x8b, 0x7b, 0xbd, + 0xb5, 0x16, 0xb7, 0x90, 0x8b, 0x5b, 0xce, 0xd7, 0xba, 0x7a, 0x73, 0x6c, 0xf1, 0xbe, 0xfa, 0x0a, + 0x8a, 0xc3, 0x8f, 0x4a, 0xb4, 0x00, 0x33, 0xc7, 0x2f, 0x8f, 0x9b, 0xe5, 0x29, 0x34, 0x0f, 0xd3, + 0xa7, 0xaf, 0x9b, 0x65, 0x03, 0x01, 0xcc, 0x1d, 0xd6, 0x4f, 0xea, 0xcd, 0x7a, 0xb9, 0x50, 0xfb, + 0x63, 0x09, 0x16, 0xd9, 0xa7, 0x5a, 0x43, 0xb4, 0x21, 0x3b, 0xd0, 0x06, 0x0e, 0xba, 0x2f, 0x30, + 0xee, 0xef, 0xfb, 0xde, 0x25, 0x26, 0x48, 0x99, 0xdb, 0x87, 0xd2, 0xca, 0xfa, 0x08, 0xe5, 0xd7, + 0xd9, 0xe3, 0xb7, 0x6d, 0xa0, 0xef, 0xc3, 0x22, 0x9f, 0xa7, 0x78, 0x9d, 0x09, 0x5a, 0x52, 0x67, + 0xac, 0x4a, 0xba, 0xe2, 0xca, 0x07, 0x06, 0xfa, 0x1a, 0xe0, 0x75, 0x9f, 0xe0, 0x88, 0xbe, 0x0c, + 0xbb, 0x18, 0x8d, 0x19, 0xeb, 0x2b, 0xe3, 0xa2, 0xa3, 0x27, 0xb0, 0xf2, 0x1c, 0x07, 0x6c, 0x87, + 0x78, 0x38, 0xf5, 0xa0, 0x0d, 0x89, 0x9d, 0x9f, 0x83, 0x86, 0x41, 0x85, 0x59, 0x0d, 0xe6, 0x25, + 0x7b, 0xa2, 0xb4, 0x49, 0x75, 0xee, 0xae, 0x8c, 0x7c, 0x25, 0xa1, 0x87, 0xb0, 0x90, 0x32, 0x2e, + 0x5a, 0xd7, 0x9d, 0xc8, 0x44, 0xaf, 0x07, 0x06, 0x3a, 0x66, 0x79, 0xd2, 0x1c, 0xcb, 0xde, 0x9e, + 0xc0, 0xa6, 0x72, 0xb0, 0x4c, 0x93, 0xca, 0x79, 0x1d, 0xc1, 0xaa, 0x68, 0x3a, 0x4d, 0x8e, 0x26, + 0x53, 0xf3, 0xa4, 0x23, 0x42, 0x4f, 0x61, 0x55, 0xbc, 0x63, 0x3a, 0x52, 0x79, 0x78, 0xdd, 0x25, + 0x61, 0x4d, 0x04, 0xf8, 0x16, 0x6e, 0x36, 0x72, 0xbb, 0x12, 0x2c, 0x79, 0x4b, 0x87, 0x50, 0x18, + 0x79, 0x22, 0xd6, 0x3e, 0x2c, 0x3f, 0xc7, 0x34, 0x63, 0x74, 0x54, 0x19, 0x37, 0x06, 0xc8, 0xd2, + 0xac, 0x49, 0x7c, 0x7d, 0x98, 0x38, 0x81, 0xf2, 0xeb, 0x7e, 0xd7, 0xa5, 0x58, 0x41, 0xb9, 0x33, + 0x0e, 0x45, 0x5a, 0xb9, 0x91, 0xdb, 0x23, 0x13, 0x13, 0xda, 0x85, 0x19, 0xc6, 0x31, 0x08, 0xa5, + 0xb1, 0x32, 0xfe, 0xa9, 0xac, 0x6a, 0x32, 0x49, 0x42, 0x1f, 0xc0, 0xfa, 0x2f, 0xf4, 0x8d, 0x7e, + 0x90, 0xd6, 0xe5, 0xb3, 0x68, 0xbe, 0x72, 0x4f, 0xfb, 0xa7, 0xce, 0x78, 0xdb, 0xd6, 0x1e, 0xfa, + 0x15, 0xdc, 0x1c, 0x4b, 0xf4, 0xe8, 0x6e, 0xd6, 0xa1, 0x13, 0xd9, 0xbb, 0xb2, 0x75, 0x5d, 0x90, + 0xd6, 0x1e, 0x3a, 0x03, 0x73, 0xd2, 0x00, 0x80, 0xbe, 0xd4, 0x48, 0xe5, 0x7f, 0x8f, 0xf1, 0x10, + 0x40, 0x40, 0xf0, 0xdb, 0x38, 0x72, 0x81, 0x26, 0x9e, 0xd1, 0x43, 0xf6, 0x6a, 0x74, 0xbf, 0xbb, + 0xdf, 0x4f, 0x00, 0x32, 0x06, 0x47, 0xa6, 0xdc, 0xc5, 0x08, 0xa9, 0x4f, 0xf4, 0x7f, 0x05, 0xe5, + 0xfd, 0xce, 0xfb, 0xd8, 0x8b, 0xf0, 0x90, 0x8e, 0xd1, 0xf7, 0xd4, 0x0b, 0xa8, 0xeb, 0x52, 0x38, + 0x73, 0x0c, 0xc7, 0x9f, 0x60, 0x97, 0x60, 0xf4, 0x02, 0x36, 0x86, 0x8f, 0x5a, 0x4e, 0x35, 0xd1, + 0x69, 0x62, 0x76, 0x47, 0xb0, 0x76, 0xe0, 0x06, 0x1d, 0xec, 0xff, 0xdf, 0x48, 0x3f, 0xe6, 0x97, + 0x52, 0x99, 0x5c, 0x6e, 0x8d, 0x81, 0x90, 0x77, 0x72, 0x45, 0x56, 0x51, 0xb1, 0x3e, 0x84, 0x1b, + 0xa2, 0xa6, 0x59, 0x95, 0xae, 0x01, 0x98, 0x94, 0xc4, 0x37, 0x50, 0xaa, 0xf7, 0x3c, 0xca, 0xa9, + 0x59, 0xfc, 0xfb, 0x54, 0xe7, 0xf2, 0x6b, 0xea, 0xb0, 0x22, 0x5f, 0xca, 0x8c, 0xd5, 0x87, 0x4f, + 0xd3, 0xe8, 0xe0, 0x54, 0x59, 0x4b, 0x61, 0xd5, 0x01, 0x60, 0xdb, 0x78, 0x60, 0xd8, 0x6b, 0x1f, + 0xff, 0xb1, 0x65, 0x7c, 0xfc, 0xb4, 0x65, 0xfc, 0xf5, 0xd3, 0x96, 0xf1, 0xf7, 0x4f, 0x5b, 0xc6, + 0x1f, 0xfe, 0xb9, 0x35, 0x75, 0x36, 0xc7, 0x91, 0xf6, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x6f, + 0xa4, 0xc2, 0xff, 0xb6, 0x16, 0x00, 0x00, } diff --git a/lib/auth/proto/auth.proto b/lib/auth/proto/auth.proto index 0d8a0ed46cc92..8c9da4d351ad7 100644 --- a/lib/auth/proto/auth.proto +++ b/lib/auth/proto/auth.proto @@ -1,11 +1,11 @@ syntax = "proto3"; package proto; -import "github.com/gravitational/teleport/lib/services/types.proto"; - import "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; +import "github.com/gravitational/teleport/lib/services/types.proto"; +import "github.com/gravitational/teleport/lib/events/events.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -208,6 +208,52 @@ message Semaphores { repeated services.SemaphoreV3 Semaphores = 1 [ (gogoproto.jsontag) = "semaphores" ]; } +// AuditStreamRequest contains stream request - event or stream control request +message AuditStreamRequest { + // Request is either stream request - create, resume or complete stream + // or event submitted as a part of the stream + oneof Request { + // CreateStream creates the stream for session ID + // should be the first message sent to the stream + CreateStream CreateStream = 1; + // ResumeStream resumes existing stream, should be the + // first message sent to the stream + ResumeStream ResumeStream = 2; + // CompleteStream completes the stream + CompleteStream CompleteStream = 3; + // FlushAndClose flushes and closes the stream + FlushAndCloseStream FlushAndCloseStream = 4; + // Event contains the stream event + events.OneOf Event = 5; + } +} + +// AuditStreamStatus returns audit stream status +// with corresponding upload ID +message AuditStreamStatus { + // UploadID is upload ID associated with the stream, + // can be used to resume the stream + string UploadID = 1; +} + +// CreateStream creates stream for a new session ID +message CreateStream { string SessionID = 1; } + +// ResumeStream resumes stream that was previously created +message ResumeStream { + // SessionID is a session ID of the stream + string SessionID = 1; + // UploadID is upload ID to resume + string UploadID = 2; +} + +// CompleteStream completes the stream +// and uploads it to the session server +message CompleteStream {} + +// FlushAndCloseStream flushes the stream data and closes the stream +message FlushAndCloseStream {} + // AuthService is authentication/authorization service implementation service AuthService { // SendKeepAlives allows node to send a stream of keep alive requests @@ -253,6 +299,7 @@ service AuthService { rpc UpdateUser(services.UserV2) returns (google.protobuf.Empty); // DeleteUser deletes an existing user in a backend by username. rpc DeleteUser(DeleteUserRequest) returns (google.protobuf.Empty); + // AcquireSemaphore acquires lease with requested resources from semaphore. rpc AcquireSemaphore(services.AcquireSemaphoreRequest) returns (services.SemaphoreLease); // KeepAliveSemaphoreLease updates semaphore lease. @@ -263,4 +310,9 @@ service AuthService { rpc GetSemaphores(services.SemaphoreFilter) returns (Semaphores); // DeleteSemaphore deletes a semaphore matching the supplied filter. rpc DeleteSemaphore(services.SemaphoreFilter) returns (google.protobuf.Empty); + + // EmitAuditEvent emits audit event + rpc EmitAuditEvent(events.OneOf) returns (google.protobuf.Empty); + // CreateAuditStream creates or resumes audit events streams + rpc CreateAuditStream(stream AuditStreamRequest) returns (stream events.StreamStatus); } diff --git a/lib/auth/resetpasswordtoken.go b/lib/auth/resetpasswordtoken.go index 1c77faa9c3441..eea8967592cc5 100644 --- a/lib/auth/resetpasswordtoken.go +++ b/lib/auth/resetpasswordtoken.go @@ -127,12 +127,21 @@ func (s *AuthServer) CreateResetPasswordToken(ctx context.Context, req CreateRes return nil, trace.Wrap(err) } - if err := s.EmitAuditEvent(events.ResetPasswordTokenCreated, events.EventFields{ - events.FieldName: req.Name, - events.ResetPasswordTokenTTL: req.TTL.String(), - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(ctx, &events.ResetPasswordTokenCreate{ + Metadata: events.Metadata{ + Type: events.ResetPasswordTokenCreateEvent, + Code: events.ResetPasswordTokenCreateCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: req.Name, + TTL: req.TTL.String(), + Expires: s.GetClock().Now().UTC().Add(req.TTL), + }, }); err != nil { - log.Warnf("Failed to emit create reset password token event: %v", err) + log.WithError(err).Warn("Failed to emit create reset password token event.") } return s.GetResetPasswordToken(ctx, token.GetName()) diff --git a/lib/auth/resetpasswordtoken_test.go b/lib/auth/resetpasswordtoken_test.go index db0cab6249e8c..c2f9a7b53b9c9 100644 --- a/lib/auth/resetpasswordtoken_test.go +++ b/lib/auth/resetpasswordtoken_test.go @@ -34,9 +34,9 @@ import ( ) type ResetPasswordTokenTest struct { - bk backend.Backend - a *AuthServer - mockedAuditLog *events.MockAuditLog + bk backend.Backend + a *AuthServer + mockEmitter *events.MockEmitter } var _ = check.Suite(&ResetPasswordTokenTest{}) @@ -77,8 +77,8 @@ func (s *ResetPasswordTokenTest) SetUpTest(c *check.C) { err = s.a.SetClusterConfig(clusterConfig) c.Assert(err, check.IsNil) - s.mockedAuditLog = events.NewMockAuditLog(0) - s.a.IAuditLog = s.mockedAuditLog + s.mockEmitter = &events.MockEmitter{} + s.a.emitter = s.mockEmitter } func (s *ResetPasswordTokenTest) TestCreateResetPasswordToken(c *check.C) { @@ -96,9 +96,11 @@ func (s *ResetPasswordTokenTest) TestCreateResetPasswordToken(c *check.C) { c.Assert(err, check.IsNil) c.Assert(token.GetUser(), check.Equals, username) c.Assert(token.GetURL(), check.Equals, "https://:3080/web/reset/"+token.GetName()) - c.Assert(s.mockedAuditLog.EmittedEvent.EventType, check.DeepEquals, events.ResetPasswordTokenCreated) - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.FieldName], check.Equals, "joe@example.com") - c.Assert(s.mockedAuditLog.EmittedEvent.Fields[events.EventUser], check.Equals, teleport.UserSystem) + event := s.mockEmitter.LastEvent() + + c.Assert(event.GetType(), check.DeepEquals, events.ResetPasswordTokenCreateEvent) + c.Assert(event.(*events.ResetPasswordTokenCreate).Name, check.Equals, "joe@example.com") + c.Assert(event.(*events.ResetPasswordTokenCreate).User, check.Equals, teleport.UserSystem) // verify that password was reset err = s.a.CheckPasswordWOToken(username, []byte(pass)) diff --git a/lib/auth/saml.go b/lib/auth/saml.go index a7deb43767224..f5c76fac9ecac 100644 --- a/lib/auth/saml.go +++ b/lib/auth/saml.go @@ -39,12 +39,19 @@ func (s *AuthServer) UpsertSAMLConnector(ctx context.Context, connector services if err := s.Identity.UpsertSAMLConnector(connector); err != nil { return trace.Wrap(err) } - - if err := s.EmitAuditEvent(events.SAMLConnectorCreated, events.EventFields{ - events.FieldName: connector.GetName(), - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(ctx, &events.OIDCConnectorCreate{ + Metadata: events.Metadata{ + Type: events.SAMLConnectorCreatedEvent, + Code: events.SAMLConnectorCreatedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: connector.GetName(), + }, }); err != nil { - log.Warnf("Failed to emit SAML connector create event: %v", err) + log.WithError(err).Warn("Failed to emit SAML connector create event.") } return nil @@ -55,12 +62,19 @@ func (s *AuthServer) DeleteSAMLConnector(ctx context.Context, connectorName stri if err := s.Identity.DeleteSAMLConnector(connectorName); err != nil { return trace.Wrap(err) } - - if err := s.EmitAuditEvent(events.SAMLConnectorDeleted, events.EventFields{ - events.FieldName: connectorName, - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(ctx, &events.OIDCConnectorDelete{ + Metadata: events.Metadata{ + Type: events.SAMLConnectorDeletedEvent, + Code: events.SAMLConnectorDeletedCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: connectorName, + }, }); err != nil { - log.Warnf("Failed to emit SAML connector delete event: %v", err) + log.WithError(err).Warn("Failed to emit SAML connector delete event.") } return nil @@ -304,31 +318,36 @@ type SAMLAuthResponse struct { // ValidateSAMLResponse consumes attribute statements from SAML identity provider func (a *AuthServer) ValidateSAMLResponse(samlResponse string) (*SAMLAuthResponse, error) { + event := &events.UserLogin{ + Metadata: events.Metadata{ + Type: events.UserLoginEvent, + }, + Method: events.LoginMethodSAML, + } re, err := a.validateSAMLResponse(samlResponse) - if err != nil { - fields := events.EventFields{ - events.LoginMethod: events.LoginMethodSAML, - events.AuthAttemptSuccess: false, - events.AuthAttemptErr: err.Error(), - } - if re != nil && re.attributeStatements != nil { - fields[events.IdentityAttributes] = re.attributeStatements + if re != nil && re.attributeStatements != nil { + attributes, err := events.EncodeMapStrings(re.attributeStatements) + if err != nil { + log.WithError(err).Warn("Failed to encode identity attributes.") + } else { + event.IdentityAttributes = attributes } - if err := a.EmitAuditEvent(events.UserSSOLoginFailure, fields); err != nil { - log.Warnf("Failed to emit SAML login failure event: %v", err) + } + if err != nil { + event.Code = events.UserSSOLoginFailureCode + event.Status.Success = false + event.Status.Error = trace.Unwrap(err).Error() + event.Status.UserMessage = err.Error() + if err := a.emitter.EmitAuditEvent(a.closeCtx, event); err != nil { + log.WithError(err).Warn("Failed to emit SAML login success event.") } return nil, trace.Wrap(err) } - fields := events.EventFields{ - events.EventUser: re.auth.Username, - events.AuthAttemptSuccess: true, - events.LoginMethod: events.LoginMethodSAML, - } - if re.attributeStatements != nil { - fields[events.IdentityAttributes] = re.attributeStatements - } - if err := a.EmitAuditEvent(events.UserSSOLogin, fields); err != nil { - log.Warnf("Failed to emit SAML user login event: %v", err) + event.Status.Success = true + event.User = re.auth.Username + event.Code = events.UserSSOLoginCode + if err := a.emitter.EmitAuditEvent(a.closeCtx, event); err != nil { + log.WithError(err).Warn("Failed to emit SAML login failure event.") } return &re.auth, nil } diff --git a/lib/auth/tls_test.go b/lib/auth/tls_test.go index 4821012e870cb..5fe03aaf387d5 100644 --- a/lib/auth/tls_test.go +++ b/lib/auth/tls_test.go @@ -1166,7 +1166,7 @@ func (s *TLSSuite) TestSharedSessions(c *check.C) { c.Assert(forwarder.Close(), check.IsNil) // start uploader process - eventsC := make(chan *events.UploadEvent, 100) + eventsC := make(chan events.UploadEvent, 100) uploader, err := events.NewUploader(events.UploaderConfig{ ServerID: "upload", DataDir: uploadDir, @@ -1202,7 +1202,8 @@ func (s *TLSSuite) TestSharedSessions(c *check.C) { history, err := clt.SearchEvents(from, to, "", 0) c.Assert(err, check.IsNil) c.Assert(history, check.NotNil) - c.Assert(len(history), check.Equals, 4) + // Extra event is the upload event + c.Assert(len(history), check.Equals, 5) // try searching for only "session.end" events (real query) history, err = clt.SearchEvents(from, to, diff --git a/lib/auth/trustedcluster.go b/lib/auth/trustedcluster.go index 8ae3a5e8f85e3..243891bdb745d 100644 --- a/lib/auth/trustedcluster.go +++ b/lib/auth/trustedcluster.go @@ -141,10 +141,19 @@ func (a *AuthServer) UpsertTrustedCluster(ctx context.Context, trustedCluster se return nil, trace.Wrap(err) } - if err := a.EmitAuditEvent(events.TrustedClusterCreate, events.EventFields{ - events.EventUser: clientUsername(ctx), + if err := a.emitter.EmitAuditEvent(ctx, &events.TrustedClusterCreate{ + Metadata: events.Metadata{ + Type: events.TrustedClusterCreateEvent, + Code: events.TrustedClusterCreateCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: trustedCluster.GetName(), + }, }); err != nil { - log.Warnf("Failed to emit trusted cluster create event: %v", err) + log.WithError(err).Warn("Failed to emit trusted cluster create event.") } return tc, nil @@ -205,10 +214,19 @@ func (a *AuthServer) DeleteTrustedCluster(ctx context.Context, name string) erro return trace.Wrap(err) } - if err := a.EmitAuditEvent(events.TrustedClusterDelete, events.EventFields{ - events.EventUser: clientUsername(ctx), + if err := a.emitter.EmitAuditEvent(ctx, &events.TrustedClusterDelete{ + Metadata: events.Metadata{ + Type: events.TrustedClusterDeleteEvent, + Code: events.TrustedClusterDeleteCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: name, + }, }); err != nil { - log.Warnf("Failed to emit trusted cluster delete event: %v", err) + log.WithError(err).Warn("Failed to emit trusted cluster delete event.") } return nil diff --git a/lib/auth/user.go b/lib/auth/user.go index 860545ff11ccc..0452c7d26537b 100644 --- a/lib/auth/user.go +++ b/lib/auth/user.go @@ -56,14 +56,22 @@ func (s *AuthServer) CreateUser(ctx context.Context, user services.User) error { connectorName = user.GetCreatedBy().Connector.ID } - if err := s.EmitAuditEvent(events.UserCreate, events.EventFields{ - events.EventUser: user.GetCreatedBy().User.Name, - events.UserExpires: user.Expiry(), - events.UserRoles: user.GetRoles(), - events.FieldName: user.GetName(), - events.UserConnector: connectorName, + if err := s.emitter.EmitAuditEvent(ctx, &events.UserCreate{ + Metadata: events.Metadata{ + Type: events.UserCreateEvent, + Code: events.UserCreateCode, + }, + UserMetadata: events.UserMetadata{ + User: user.GetCreatedBy().User.Name, + }, + ResourceMetadata: events.ResourceMetadata{ + Name: user.GetName(), + Expires: user.Expiry(), + }, + Connector: connectorName, + Roles: user.GetRoles(), }); err != nil { - log.Warnf("Failed to emit user create event: %v", err) + log.WithError(err).Warn("Failed to emit user create event.") } return nil @@ -82,14 +90,22 @@ func (s *AuthServer) UpdateUser(ctx context.Context, user services.User) error { connectorName = user.GetCreatedBy().Connector.ID } - if err := s.EmitAuditEvent(events.UserUpdate, events.EventFields{ - events.EventUser: clientUsername(ctx), - events.FieldName: user.GetName(), - events.UserExpires: user.Expiry(), - events.UserRoles: user.GetRoles(), - events.UserConnector: connectorName, + if err := s.emitter.EmitAuditEvent(ctx, &events.UserCreate{ + Metadata: events.Metadata{ + Type: events.UserUpdatedEvent, + Code: events.UserUpdateCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: user.GetName(), + Expires: user.Expiry(), + }, + Connector: connectorName, + Roles: user.GetRoles(), }); err != nil { - log.Warnf("Failed to emit user update event: %v", err) + log.WithError(err).Warn("Failed to emit user update event.") } return nil @@ -109,13 +125,22 @@ func (s *AuthServer) UpsertUser(user services.User) error { connectorName = user.GetCreatedBy().Connector.ID } - if err := s.EmitAuditEvent(events.UserUpdate, events.EventFields{ - events.EventUser: user.GetName(), - events.UserExpires: user.Expiry(), - events.UserRoles: user.GetRoles(), - events.UserConnector: connectorName, + if err := s.emitter.EmitAuditEvent(s.closeCtx, &events.UserCreate{ + Metadata: events.Metadata{ + Type: events.UserCreateEvent, + Code: events.UserCreateCode, + }, + UserMetadata: events.UserMetadata{ + User: user.GetName(), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: user.GetName(), + Expires: user.Expiry(), + }, + Connector: connectorName, + Roles: user.GetRoles(), }); err != nil { - log.Warnf("Failed to emit user update event: %v", err) + log.WithError(err).Warn("Failed to emit user upsert event.") } return nil @@ -142,11 +167,19 @@ func (s *AuthServer) DeleteUser(ctx context.Context, user string) error { } // If the user was successfully deleted, emit an event. - if err := s.EmitAuditEvent(events.UserDelete, events.EventFields{ - events.FieldName: user, - events.EventUser: clientUsername(ctx), + if err := s.emitter.EmitAuditEvent(s.closeCtx, &events.UserDelete{ + Metadata: events.Metadata{ + Type: events.UserDeleteEvent, + Code: events.UserDeleteCode, + }, + UserMetadata: events.UserMetadata{ + User: clientUsername(ctx), + }, + ResourceMetadata: events.ResourceMetadata{ + Name: user, + }, }); err != nil { - log.Warnf("Failed to emit user delete event: %v", err) + log.WithError(err).Warn("Failed to emit user delete event.") } return nil diff --git a/lib/bpf/bpf.go b/lib/bpf/bpf.go index a2b584f25154d..b8471ca566bf2 100644 --- a/lib/bpf/bpf.go +++ b/lib/bpf/bpf.go @@ -286,23 +286,35 @@ func (s *Service) emitCommandEvent(eventBytes []byte) { argv := args.([]string) // Emit "command" event. - eventFields := events.EventFields{ - // Common fields. - events.EventNamespace: ctx.Namespace, - events.SessionEventID: ctx.SessionID, - events.SessionServerID: ctx.ServerID, - events.EventLogin: ctx.Login, - events.EventUser: ctx.User, - // Command fields. - events.PID: event.PID, - events.PPID: event.PPID, - events.CgroupID: event.CgroupID, - events.Program: convertString(unsafe.Pointer(&event.Command)), - events.Path: argv[0], - events.Argv: argv[1:], - events.ReturnCode: event.ReturnCode, + sessionCommandEvent := &events.SessionCommand{ + Metadata: events.Metadata{ + Type: events.SessionCommandEvent, + Code: events.SessionCommandCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.ServerID, + ServerNamespace: ctx.Namespace, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: ctx.SessionID, + }, + UserMetadata: events.UserMetadata{ + User: ctx.User, + Login: ctx.Login, + }, + BPFMetadata: events.BPFMetadata{ + CgroupID: event.CgroupID, + Program: convertString(unsafe.Pointer(&event.Command)), + PID: event.PID, + }, + PPID: event.PPID, + ReturnCode: event.ReturnCode, + Path: argv[0], + Argv: argv[1:], + } + if err := ctx.Emitter.EmitAuditEvent(ctx.Context, sessionCommandEvent); err != nil { + log.WithError(err).Warn("Failed to emit command event.") } - ctx.AuditLog.EmitAuditEvent(events.SessionCommand, eventFields) // Now that the event has been processed, remove from cache. s.argsCache.Remove(strconv.FormatUint(event.PID, 10)) @@ -331,22 +343,33 @@ func (s *Service) emitDiskEvent(eventBytes []byte) { return } - eventFields := events.EventFields{ - // Common fields. - events.EventNamespace: ctx.Namespace, - events.SessionEventID: ctx.SessionID, - events.SessionServerID: ctx.ServerID, - events.EventLogin: ctx.Login, - events.EventUser: ctx.User, - // Disk fields. - events.PID: event.PID, - events.CgroupID: event.CgroupID, - events.Program: convertString(unsafe.Pointer(&event.Command)), - events.Path: convertString(unsafe.Pointer(&event.Path)), - events.Flags: event.Flags, - events.ReturnCode: event.ReturnCode, - } - ctx.AuditLog.EmitAuditEvent(events.SessionDisk, eventFields) + sessionDiskEvent := &events.SessionDisk{ + Metadata: events.Metadata{ + Type: events.SessionDiskEvent, + Code: events.SessionDiskCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.ServerID, + ServerNamespace: ctx.Namespace, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: ctx.SessionID, + }, + UserMetadata: events.UserMetadata{ + User: ctx.User, + Login: ctx.Login, + }, + BPFMetadata: events.BPFMetadata{ + CgroupID: event.CgroupID, + Program: convertString(unsafe.Pointer(&event.Command)), + PID: event.PID, + }, + Flags: event.Flags, + Path: convertString(unsafe.Pointer(&event.Path)), + ReturnCode: event.ReturnCode, + } + // Logs can be DoS by event failures here + _ = ctx.Emitter.EmitAuditEvent(ctx.Context, sessionDiskEvent) } // emit4NetworkEvent will parse and emit IPv4 events to the Audit Log. @@ -381,23 +404,35 @@ func (s *Service) emit4NetworkEvent(eventBytes []byte) { binary.LittleEndian.PutUint32(dst, uint32(event.DstAddr)) dstAddr := net.IP(dst) - eventFields := events.EventFields{ - // Common fields. - events.EventNamespace: ctx.Namespace, - events.SessionEventID: ctx.SessionID, - events.SessionServerID: ctx.ServerID, - events.EventLogin: ctx.Login, - events.EventUser: ctx.User, - // Network fields. - events.PID: event.PID, - events.CgroupID: event.CgroupID, - events.Program: convertString(unsafe.Pointer(&event.Command)), - events.SrcAddr: srcAddr, - events.DstAddr: dstAddr, - events.DstPort: event.DstPort, - events.TCPVersion: 4, - } - ctx.AuditLog.EmitAuditEvent(events.SessionNetwork, eventFields) + sessionNetworkEvent := &events.SessionNetwork{ + Metadata: events.Metadata{ + Type: events.SessionNetworkEvent, + Code: events.SessionNetworkCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.ServerID, + ServerNamespace: ctx.Namespace, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: ctx.SessionID, + }, + UserMetadata: events.UserMetadata{ + User: ctx.User, + Login: ctx.Login, + }, + BPFMetadata: events.BPFMetadata{ + CgroupID: event.CgroupID, + Program: convertString(unsafe.Pointer(&event.Command)), + PID: uint64(event.PID), + }, + DstPort: int32(event.DstPort), + DstAddr: dstAddr.String(), + SrcAddr: srcAddr.String(), + TCPVersion: 4, + } + if err := ctx.Emitter.EmitAuditEvent(ctx.Context, sessionNetworkEvent); err != nil { + log.WithError(err).Warn("Failed to emit network event.") + } } // emit6NetworkEvent will parse and emit IPv6 events to the Audit Log. @@ -438,23 +473,35 @@ func (s *Service) emit6NetworkEvent(eventBytes []byte) { binary.LittleEndian.PutUint32(dst[12:], event.DstAddr[3]) dstAddr := net.IP(dst) - eventFields := events.EventFields{ - // Common fields. - events.EventNamespace: ctx.Namespace, - events.SessionEventID: ctx.SessionID, - events.SessionServerID: ctx.ServerID, - events.EventLogin: ctx.Login, - events.EventUser: ctx.User, - // Connect fields. - events.PID: event.PID, - events.CgroupID: event.CgroupID, - events.Program: convertString(unsafe.Pointer(&event.Command)), - events.SrcAddr: srcAddr, - events.DstAddr: dstAddr, - events.DstPort: event.DstPort, - events.TCPVersion: 6, - } - ctx.AuditLog.EmitAuditEvent(events.SessionNetwork, eventFields) + sessionNetworkEvent := &events.SessionNetwork{ + Metadata: events.Metadata{ + Type: events.SessionNetworkEvent, + Code: events.SessionNetworkCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.ServerID, + ServerNamespace: ctx.Namespace, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: ctx.SessionID, + }, + UserMetadata: events.UserMetadata{ + User: ctx.User, + Login: ctx.Login, + }, + BPFMetadata: events.BPFMetadata{ + CgroupID: event.CgroupID, + Program: convertString(unsafe.Pointer(&event.Command)), + PID: uint64(event.PID), + }, + DstPort: int32(event.DstPort), + DstAddr: dstAddr.String(), + SrcAddr: srcAddr.String(), + TCPVersion: 6, + } + if err := ctx.Emitter.EmitAuditEvent(ctx.Context, sessionNetworkEvent); err != nil { + log.WithError(err).Warn("Failed to emit network event.") + } } func (s *Service) getWatch(cgoupID uint64) (ctx *SessionContext, ok bool) { diff --git a/lib/bpf/bpf_test.go b/lib/bpf/bpf_test.go index c0979b275a0a5..01d2046e8896c 100644 --- a/lib/bpf/bpf_test.go +++ b/lib/bpf/bpf_test.go @@ -26,17 +26,13 @@ import ( "net/http/httptest" "os" os_exec "os/exec" - "sync" "testing" "time" "unsafe" - "github.com/gravitational/trace" - "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" - "github.com/gravitational/teleport/lib/session" "github.com/gravitational/teleport/lib/utils" "github.com/pborman/uuid" @@ -80,7 +76,7 @@ func (s *Suite) TestWatch(c *check.C) { }) // Create a fake audit log that can be used to capture the events emitted. - auditLog := newFakeLog() + emitter := &events.MockEmitter{} // Create and start a program that does nothing. Since sleep will run longer // than we wait below, nothing should be emit to the Audit Log. @@ -97,7 +93,7 @@ func (s *Suite) TestWatch(c *check.C) { Login: "foo", User: "foo@example.com", PID: cmd.Process.Pid, - AuditLog: auditLog, + Emitter: emitter, Events: map[string]bool{ teleport.EnhancedRecordingCommand: true, teleport.EnhancedRecordingDisk: true, @@ -131,7 +127,7 @@ func (s *Suite) TestWatch(c *check.C) { for { select { case <-time.Tick(250 * time.Millisecond): - c.Assert(auditLog.events, check.HasLen, 0) + c.Assert(emitter.LastEvent(), check.IsNil) case <-timer.C: return } @@ -447,58 +443,6 @@ func executeHTTP(c *check.C, doneContext context.Context, endpoint string) { } } -// fakeLog is used in tests to obtain events emitted to the Audit Log. -type fakeLog struct { - mu sync.Mutex - events []events.EventFields -} - -func newFakeLog() *fakeLog { - return &fakeLog{ - events: make([]events.EventFields, 0), - } -} - -func (a *fakeLog) EmitAuditEvent(e events.Event, f events.EventFields) error { - a.mu.Lock() - defer a.mu.Unlock() - - a.events = append(a.events, f) - return nil -} - -func (a *fakeLog) PostSessionSlice(s events.SessionSlice) error { - return trace.NotImplemented("not implemented") -} - -func (a *fakeLog) UploadSessionRecording(r events.SessionRecording) error { - return trace.NotImplemented("not implemented") -} - -func (a *fakeLog) GetSessionChunk(namespace string, sid session.ID, offsetBytes int, maxBytes int) ([]byte, error) { - return nil, trace.NotFound("") -} - -func (a *fakeLog) GetSessionEvents(namespace string, sid session.ID, after int, includePrintEvents bool) ([]events.EventFields, error) { - return nil, trace.NotFound("") -} - -func (a *fakeLog) SearchEvents(fromUTC, toUTC time.Time, query string, limit int) ([]events.EventFields, error) { - return nil, trace.NotFound("") -} - -func (a *fakeLog) SearchSessionEvents(fromUTC time.Time, toUTC time.Time, limit int) ([]events.EventFields, error) { - return nil, trace.NotFound("") -} - -func (a *fakeLog) WaitForDelivery(context.Context) error { - return trace.NotImplemented("not implemented") -} - -func (a *fakeLog) Close() error { - return trace.NotFound("") -} - // isRoot returns a boolean if the test is being run as root or not. Tests // for this package must be run as root. func isRoot() bool { diff --git a/lib/bpf/common.go b/lib/bpf/common.go index 9dd6e3e1d333b..b6e1ebfd4f528 100644 --- a/lib/bpf/common.go +++ b/lib/bpf/common.go @@ -22,6 +22,7 @@ package bpf import "C" import ( + "context" "unsafe" "github.com/gravitational/teleport" @@ -52,6 +53,9 @@ type BPF interface { // srv.ServerContext, unfortunately due to circular imports with lib/srv and // lib/bpf, part of that structure is reproduced in SessionContext. type SessionContext struct { + // Context is a cancel context, scoped to a server, and not a session. + Context context.Context + // Namespace is the namespace within which this session occurs. Namespace string @@ -71,8 +75,8 @@ type SessionContext struct { // used by Teleport to find itself by cgroup. PID int - // AuditLog is used to store events for a particular sessionl - AuditLog events.IAuditLog + // Emitter is used to record events for a particular session + Emitter events.Emitter // Events is the set of events (command, disk, or network) to record for // this session. diff --git a/lib/config/configuration.go b/lib/config/configuration.go index b236d0cfcf8b4..4c736eff3dee1 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -901,7 +901,7 @@ func Configure(clf *CommandLineFlags, cfg *service.Config) error { // If sessions are being recorded at the proxy host key checking must be // enabled. This make sure the host certificate key algorithm is FIPS // compliant. - if cfg.Auth.ClusterConfig.GetSessionRecording() == services.RecordAtProxy && + if services.IsRecordAtProxy(cfg.Auth.ClusterConfig.GetSessionRecording()) && cfg.Auth.ClusterConfig.GetProxyChecksHostKeys() == services.HostKeyCheckNo { return trace.BadParameter("non-FIPS compliant proxy settings: \"proxy_checks_host_keys\" must be true") } diff --git a/lib/defaults/defaults.go b/lib/defaults/defaults.go index 7476ab394455a..5a6d4725d3086 100644 --- a/lib/defaults/defaults.go +++ b/lib/defaults/defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2016 Gravitational, Inc. +Copyright 2016-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -249,6 +249,18 @@ const ( // CallbackTimeout is how long to wait for a response from SSO provider // before timeout. CallbackTimeout = 180 * time.Second + + // ConcurrentUploadsPerStream limits the amount of concurrent uploads + // per stream + ConcurrentUploadsPerStream = 1 + + // UploadGracePeriod is a period after which non-completed + // upload is considered abandoned and will be completed by the reconciler + UploadGracePeriod = 24 * time.Hour + + // InactivityFlushPeriod is a period of inactivity + // that triggers upload of the data - flush. + InactivityFlushPeriod = 5 * time.Minute ) var ( diff --git a/lib/events/api.go b/lib/events/api.go index a1afaca1b96e1..bf0b5b7c0dd59 100644 --- a/lib/events/api.go +++ b/lib/events/api.go @@ -1,5 +1,5 @@ /* -Copyright 2015 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ import ( "time" "github.com/gravitational/teleport/lib/session" + + "github.com/gravitational/trace" ) const ( @@ -362,6 +364,188 @@ const ( V3 = 3 ) +// AuditEvent represents audit event +type AuditEvent interface { + // ProtoMarshaler implements efficient + // protobuf marshaling methods + ProtoMarshaler + + // GetID returns unique event ID + GetID() string + // SetID sets unique event ID + SetID(id string) + + // GetCode returns event short diagnostic code + GetCode() string + // SetCode sets unique event diagnostic code + SetCode(string) + + // GetType returns event type + GetType() string + // SetCode sets unique type + SetType(string) + + // GetTime returns event time + GetTime() time.Time + // SetTime sets event time + SetTime(time.Time) + + // GetIndex gets event index - a non-unique + // monotonically incremented number + // in the event sequence + GetIndex() int64 + // SetIndex sets event index + SetIndex(idx int64) +} + +// ProtoMarshaler implements marshaler interface +type ProtoMarshaler interface { + // Size returns size of the object when marshaled + Size() (n int) + + // MarshalTo marshals the object to sized buffer + MarshalTo(dAtA []byte) (int, error) +} + +// ServerMetadataGetter represents interface +// that provides information about its server id +type ServerMetadataGetter interface { + // GetServerID returns event server ID + GetServerID() string + + // GetServerNamespace returns event server namespace + GetServerNamespace() string +} + +// ServerMetadataSetter represents interface +// that provides information about its server id +type ServerMetadataSetter interface { + // SetServerID sets server ID of the event + SetServerID(string) + + // SetServerNamespace returns event server namespace + SetServerNamespace(string) +} + +// SessionMetadataGetter represents interface +// that provides information about events' session metadata +type SessionMetadataGetter interface { + // GetSessionID returns event session ID + GetSessionID() string +} + +// SessionMetadataSetter represents interface +// that sets session metadata +type SessionMetadataSetter interface { + // SetSessionID sets event session ID + SetSessionID(string) +} + +// SetCode is a shortcut that sets code for the audit event +func SetCode(event AuditEvent, code string) AuditEvent { + event.SetCode(code) + return event +} + +// Emitter creates and manages audit log streams +type Emitter interface { + // Emit emits a single audit event + EmitAuditEvent(context.Context, AuditEvent) error +} + +// Streamer creates and resumes event streams for session IDs +type Streamer interface { + // CreateAuditStream creates event stream + CreateAuditStream(context.Context, session.ID) (Stream, error) + // ResumeAuditStream resumes the stream for session upload that + // has not been completed yet. + ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) +} + +// StreamPart represents uploaded stream part +type StreamPart struct { + // Number is a part number + Number int64 + // ETag is a part e-tag + ETag string +} + +// StreamUpload represents stream multipart upload +type StreamUpload struct { + // ID is unique upload ID + ID string + // SessionID is a session ID of the upload + SessionID session.ID + // Initiated contains the timestamp of when the upload + // was initiated, not always initialized + Initiated time.Time +} + +// String returns user friendly representation of the upload +func (u StreamUpload) String() string { + return fmt.Sprintf("Upload(session=%v, id=%v, initiated=%v)", u.SessionID, u.ID, u.Initiated) +} + +// CheckAndSetDefaults checks and sets default values +func (u *StreamUpload) CheckAndSetDefaults() error { + if u.ID == "" { + return trace.BadParameter("missing parameter ID") + } + if u.SessionID == "" { + return trace.BadParameter("missing parameter SessionID") + } + return nil +} + +// MultipartUploader handles multipart uploads and downloads for session streams +type MultipartUploader interface { + // CreateUpload creates a multipart upload + CreateUpload(ctx context.Context, sessionID session.ID) (*StreamUpload, error) + // CompleteUpload completes the upload + CompleteUpload(ctx context.Context, upload StreamUpload, parts []StreamPart) error + // UploadPart uploads part and returns the part + UploadPart(ctx context.Context, upload StreamUpload, partNumber int64, partBody io.ReadSeeker) (*StreamPart, error) + // ListParts returns all uploaded parts for the completed upload in sorted order + ListParts(ctx context.Context, upload StreamUpload) ([]StreamPart, error) + // ListUploads lists uploads that have been initiated but not completed with + // earlier uploads returned first + ListUploads(ctx context.Context) ([]StreamUpload, error) +} + +// Stream is used to create continuous ordered sequence of events +// associated with a session. +type Stream interface { + // Emitter allows stream to emit audit event in the context of the event stream + Emitter + // Status returns channel broadcasting updates about the stream state: + // last event index that was uploaded and the upload ID + Status() <-chan StreamStatus + // Done returns channel closed when streamer is closed + // should be used to detect sending errors + Done() <-chan struct{} + // Complete closes the stream and marks it finalized, + // releases associated resources, in case of failure, + // closes this stream on the client side + Complete(ctx context.Context) error + // Close flushes non-uploaded flight stream data without marking + // the stream completed and closes the stream instance + Close(ctx context.Context) error +} + +// StreamWriter implements io.Writer to be plugged into the multi-writer +// associated with every session. It forwards session stream to the audit log +type StreamWriter interface { + io.Writer + Stream +} + +// StreamEmitter supports submitting single events and streaming +// session events +type StreamEmitter interface { + Emitter + Streamer +} + // IAuditLog is the primary (and the only external-facing) interface for AuditLogger. // If you wish to implement a different kind of logger (not filesystem-based), you // have to implement this interface @@ -369,8 +553,9 @@ type IAuditLog interface { // Closer releases connection and resources associated with log if any io.Closer - // EmitAuditEvent emits audit event - EmitAuditEvent(Event, EventFields) error + // EmitAuditEventLegacy emits audit in legacy format + // DELETE IN: 5.0.0 + EmitAuditEventLegacy(Event, EventFields) error // DELETE IN: 2.7.0 // This method is no longer necessary as nodes and proxies >= 2.7.0 diff --git a/lib/events/auditlog.go b/lib/events/auditlog.go index 7558bb8d58b68..a5f384dc98a9f 100644 --- a/lib/events/auditlog.go +++ b/lib/events/auditlog.go @@ -1,5 +1,5 @@ /* -Copyright 2015-2019 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -48,7 +48,15 @@ const ( // in /var/lib/teleport/logs/sessions SessionLogsDir = "sessions" - // PlaybacksDir is a directory for playbacks + // StreamingLogsDir is a subdirectory of sessions /var/lib/teleport/logs/streaming + // is used in new versions of the uploader + StreamingLogsDir = "streaming" + + // RecordsDir is a subdirectory with default records /var/lib/teleport/logs/records + // is used in new versions of the uploader + RecordsDir = "records" + + // PlaybackDir is a directory for playbacks PlaybackDir = "playbacks" // LogfileExt defines the ending of the daily event log file @@ -192,6 +200,9 @@ func (a *AuditLogConfig) CheckAndSetDefaults() error { if a.ServerID == "" { return trace.BadParameter("missing parameter ServerID") } + if a.UploadHandler == nil { + return trace.BadParameter("missing parameter UploadHandler") + } if a.Clock == nil { a.Clock = clockwork.NewRealClock() } @@ -325,13 +336,6 @@ func (l *AuditLog) UploadSessionRecording(r SessionRecording) error { return trace.Wrap(err) } - // This function runs on the Auth Server. If no upload handler is defined - // (for example, not going to S3) then unarchive it to Auth Server disk. - if l.UploadHandler == nil { - err := utils.Extract(r.Recording, filepath.Join(l.DataDir, l.ServerID, SessionLogsDir, r.Namespace)) - return trace.Wrap(err) - } - // Upload session recording to endpoint defined in file configuration. Like S3. start := time.Now() url, err := l.UploadHandler.Upload(context.TODO(), r.SessionID, r.Recording) @@ -340,7 +344,7 @@ func (l *AuditLog) UploadSessionRecording(r SessionRecording) error { return trace.Wrap(err) } l.WithFields(log.Fields{"duration": time.Since(start), "session-id": r.SessionID}).Debugf("Session upload completed.") - return l.EmitAuditEvent(SessionUpload, EventFields{ + return l.EmitAuditEventLegacy(SessionUploadE, EventFields{ SessionEventID: string(r.SessionID), URL: url, EventIndex: SessionUploadIndex, @@ -376,16 +380,16 @@ func (l *AuditLog) processSlice(sl SessionLogger, slice *SessionSlice) error { if err != nil { return trace.Wrap(err) } - if err := l.EmitAuditEvent(Event{Name: chunk.EventType}, fields); err != nil { + if err := l.EmitAuditEventLegacy(Event{Name: chunk.EventType}, fields); err != nil { return trace.Wrap(err) } } return nil } -func (l *AuditLog) getAuthServers() ([]string, error) { +func getAuthServers(dataDir string) ([]string, error) { // scan the log directory: - df, err := os.Open(l.DataDir) + df, err := os.Open(dataDir) if err != nil { return nil, trace.Wrap(err) } @@ -403,7 +407,7 @@ func (l *AuditLog) getAuthServers() ([]string, error) { // can be colliding with customer-picked names, so consider // moving the folders to a folder level up and keep the servers // one small - if fileName != PlaybackDir && fileName != teleport.ComponentUpload { + if fileName != PlaybackDir && fileName != teleport.ComponentUpload && fileName != RecordsDir { authServers = append(authServers, fileName) } } @@ -504,14 +508,20 @@ func (idx *sessionIndex) chunksFileName(index int) string { } func (l *AuditLog) readSessionIndex(namespace string, sid session.ID) (*sessionIndex, error) { - authServers, err := l.getAuthServers() - if err != nil { + index, err := readSessionIndex(l.DataDir, []string{PlaybackDir}, namespace, sid) + if err == nil { + return index, nil + } + if !trace.IsNotFound(err) { return nil, trace.Wrap(err) } - if l.UploadHandler == nil { - return readSessionIndex(l.DataDir, authServers, namespace, sid) + // some legacy records may be stored unpacked in the JSON format + // in the data dir, under server format + authServers, err := getAuthServers(l.DataDir) + if err != nil { + return nil, trace.Wrap(err) } - return readSessionIndex(l.DataDir, []string{PlaybackDir}, namespace, sid) + return readSessionIndex(l.DataDir, authServers, namespace, sid) } func readSessionIndex(dataDir string, authServers []string, namespace string, sid session.ID) (*sessionIndex, error) { @@ -646,14 +656,40 @@ func (l *AuditLog) downloadSession(namespace string, sid session.ID) error { } l.WithFields(log.Fields{"duration": time.Since(start)}).Debugf("Downloaded %v to %v.", sid, tarballPath) - start = time.Now() _, err = tarball.Seek(0, 0) if err != nil { return trace.ConvertSystemError(err) } - if err := utils.Extract(tarball, l.playbackDir); err != nil { + format, err := DetectFormat(tarball) + if err != nil { + l.WithError(err).Debugf("Failed to detect playback %v format.", tarballPath) return trace.Wrap(err) } + _, err = tarball.Seek(0, 0) + if err != nil { + return trace.ConvertSystemError(err) + } + switch { + case format.Proto == true: + start = time.Now() + l.Debugf("Converting %v to playback format.", tarballPath) + protoReader := NewProtoReader(tarball) + err = WriteForPlayback(l.Context, sid, protoReader, l.playbackDir) + if err != nil { + l.WithError(err).Error("Failed to convert.") + return trace.Wrap(err) + } + stats := protoReader.GetStats().ToFields() + stats["duration"] = time.Since(start) + l.WithFields(stats).Debugf("Converted %v to %v.", tarballPath, l.playbackDir) + case format.Tar == true: + if err := utils.Extract(tarball, l.playbackDir); err != nil { + return trace.Wrap(err) + } + default: + return trace.BadParameter("Unexpected format %v.", format) + } + // Extract every chunks file on disk while holding the context, // otherwise parallel downloads will try to unpack the file at the same time. idx, err := l.readSessionIndex(namespace, sid) @@ -677,10 +713,8 @@ func (l *AuditLog) downloadSession(namespace string, sid session.ID) error { // to receive a live stream of a given session. The reader allows access to a // session stream range from offsetBytes to offsetBytes+maxBytes func (l *AuditLog) GetSessionChunk(namespace string, sid session.ID, offsetBytes, maxBytes int) ([]byte, error) { - if l.UploadHandler != nil { - if err := l.downloadSession(namespace, sid); err != nil { - return nil, trace.Wrap(err) - } + if err := l.downloadSession(namespace, sid); err != nil { + return nil, trace.Wrap(err) } var data []byte for { @@ -847,10 +881,8 @@ func (l *AuditLog) GetSessionEvents(namespace string, sid session.ID, afterN int } // If code has to fetch print events (for playback) it has to download // the playback from external storage first - if l.UploadHandler != nil { - if err := l.downloadSession(namespace, sid); err != nil { - return nil, trace.Wrap(err) - } + if err := l.downloadSession(namespace, sid); err != nil { + return nil, trace.Wrap(err) } idx, err := l.readSessionIndex(namespace, sid) if err != nil { @@ -909,16 +941,26 @@ func (l *AuditLog) fetchSessionEvents(fileName string, afterN int) ([]EventField return retval, nil } -// EmitAuditEvent adds a new event to the log. If emitting fails, a Prometheus +// EmitAuditEvent adds a new event to the local file log +func (l *AuditLog) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + err := l.localLog.EmitAuditEvent(ctx, event) + if err != nil { + auditFailedEmit.Inc() + return trace.Wrap(err) + } + return nil +} + +// EmitAuditEventLegacy adds a new event to the log. If emitting fails, a Prometheus // counter is incremented. -func (l *AuditLog) EmitAuditEvent(event Event, fields EventFields) error { +func (l *AuditLog) EmitAuditEventLegacy(event Event, fields EventFields) error { // If an external logger has been set, use it as the emitter, otherwise // fallback to the local disk based emitter. var emitAuditEvent func(event Event, fields EventFields) error if l.ExternalLog != nil { - emitAuditEvent = l.ExternalLog.EmitAuditEvent + emitAuditEvent = l.ExternalLog.EmitAuditEventLegacy } else { - emitAuditEvent = l.localLog.EmitAuditEvent + emitAuditEvent = l.localLog.EmitAuditEventLegacy } // Emit the event. If it fails for any reason a Prometheus counter is @@ -934,7 +976,7 @@ func (l *AuditLog) EmitAuditEvent(event Event, fields EventFields) error { // auditDirs returns directories used for audit log storage func (l *AuditLog) auditDirs() ([]string, error) { - authServers, err := l.getAuthServers() + authServers, err := getAuthServers(l.DataDir) if err != nil { return nil, trace.Wrap(err) } @@ -1039,3 +1081,58 @@ func (l *AuditLog) periodicSpaceMonitor() { } } } + +// LegacyHandlerConfig configures +// legacy local handler adapter +type LegacyHandlerConfig struct { + // Handler is a handler that local handler wraps + Handler MultipartHandler + // Dir is a root directory with unpacked session records + // stored in legacy format + Dir string +} + +// CheckAndSetDefaults checks and sets default values +func (cfg *LegacyHandlerConfig) CheckAndSetDefaults() error { + if cfg.Handler == nil { + return trace.BadParameter("missing parameter Handler") + } + if cfg.Dir == "" { + return trace.BadParameter("missing parameter Dir") + } + return nil +} + +// NewLegacyHandler returns new legacy handler +func NewLegacyHandler(cfg LegacyHandlerConfig) (*LegacyHandler, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + return &LegacyHandler{ + MultipartHandler: cfg.Handler, + cfg: cfg, + }, nil +} + +// LegacyHandler wraps local file uploader and handles +// old style uploads stored directly on disk +type LegacyHandler struct { + MultipartHandler + cfg LegacyHandlerConfig +} + +// Download downloads session tarball and writes it to writer +func (l *LegacyHandler) Download(ctx context.Context, sessionID session.ID, writer io.WriterAt) error { + // legacy format stores unpacked records in the directory + // in one of the sub-folders set up for the auth server ID + // if the file is present there, there no need to unpack and convert it + authServers, err := getAuthServers(l.cfg.Dir) + if err != nil { + return trace.Wrap(err) + } + _, err = readSessionIndex(l.cfg.Dir, authServers, defaults.Namespace, sessionID) + if err == nil { + return nil + } + return l.cfg.Handler.Download(ctx, sessionID, writer) +} diff --git a/lib/events/auditlog_test.go b/lib/events/auditlog_test.go index eb98ccbf10c16..52a8f99fc0b9f 100644 --- a/lib/events/auditlog_test.go +++ b/lib/events/auditlog_test.go @@ -30,7 +30,6 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/defaults" - "github.com/gravitational/teleport/lib/events/filesessions" "github.com/gravitational/teleport/lib/fixtures" "github.com/gravitational/teleport/lib/session" "github.com/gravitational/teleport/lib/utils" @@ -60,12 +59,20 @@ func (a *AuditTestSuite) makeLog(c *check.C, dataDir string, recordSessions bool // creates a file-based audit log and returns a proper *AuditLog pointer // instead of the usual IAuditLog interface func (a *AuditTestSuite) makeLogWithClock(c *check.C, dataDir string, recordSessions bool, clock clockwork.Clock) (*AuditLog, error) { + handler, err := NewLegacyHandler(LegacyHandlerConfig{ + Handler: NewMemoryUploader(), + Dir: dataDir, + }) + if err != nil { + return nil, trace.Wrap(err) + } alog, err := NewAuditLog(AuditLogConfig{ DataDir: dataDir, RecordSessions: recordSessions, ServerID: "server1", Clock: clock, UIDGenerator: utils.NewFakeUID(), + UploadHandler: handler, }) if err != nil { return nil, trace.Wrap(err) @@ -94,18 +101,14 @@ func (a *AuditTestSuite) TestNew(c *check.C) { func (a *AuditTestSuite) TestSessionsOnOneAuthServer(c *check.C) { fakeClock := clockwork.NewFakeClock() - storageDir := c.MkDir() - fileHandler, err := filesessions.NewHandler(filesessions.Config{ - Directory: storageDir, - }) - c.Assert(err, check.IsNil) + uploader := NewMemoryUploader() alog, err := NewAuditLog(AuditLogConfig{ Clock: fakeClock, DataDir: a.dataDir, RecordSessions: true, ServerID: "server1", - UploadHandler: fileHandler, + UploadHandler: uploader, }) c.Assert(err, check.IsNil) @@ -114,7 +117,7 @@ func (a *AuditTestSuite) TestSessionsOnOneAuthServer(c *check.C) { DataDir: a.dataDir, RecordSessions: true, ServerID: "server2", - UploadHandler: fileHandler, + UploadHandler: uploader, }) c.Assert(err, check.IsNil) @@ -139,7 +142,7 @@ func (a *AuditTestSuite) TestSessionsOnOneAuthServer(c *check.C) { Namespace: defaults.Namespace, SessionID: sessionID, Chunks: []*SessionChunk{ - // start the seession + // start the session &SessionChunk{ Time: fakeClock.Now().UTC().UnixNano(), EventIndex: 0, @@ -195,7 +198,7 @@ func (a *AuditTestSuite) TestSessionsOnOneAuthServer(c *check.C) { func upload(c *check.C, uploadDir string, clock clockwork.Clock, auditLog IAuditLog) { // start uploader process - eventsC := make(chan *UploadEvent, 100) + eventsC := make(chan UploadEvent, 100) uploader, err := NewUploader(UploaderConfig{ ServerID: "upload", DataDir: uploadDir, @@ -222,12 +225,6 @@ func upload(c *check.C, uploadDir string, clock clockwork.Clock, auditLog IAudit } func (a *AuditTestSuite) TestSessionRecordingOff(c *check.C) { - storageDir := c.MkDir() - fileHandler, err := filesessions.NewHandler(filesessions.Config{ - Directory: storageDir, - }) - c.Assert(err, check.IsNil) - now := time.Now().In(time.UTC).Round(time.Second) // create audit log with session recording disabled @@ -238,7 +235,7 @@ func (a *AuditTestSuite) TestSessionRecordingOff(c *check.C) { DataDir: a.dataDir, RecordSessions: true, ServerID: "server1", - UploadHandler: fileHandler, + UploadHandler: NewMemoryUploader(), }) c.Assert(err, check.IsNil) @@ -321,7 +318,7 @@ func (a *AuditTestSuite) TestBasicLogging(c *check.C) { alog.Clock = clockwork.NewFakeClockAt(now) // emit regular event: - err = alog.EmitAuditEvent(Event{Name: "user.joined"}, EventFields{"apples?": "yes"}) + err = alog.EmitAuditEventLegacy(Event{Name: "user.joined"}, EventFields{"apples?": "yes"}) c.Assert(err, check.IsNil) logfile := alog.localLog.file.Name() c.Assert(alog.Close(), check.IsNil) @@ -352,7 +349,7 @@ func (a *AuditTestSuite) TestLogRotation(c *check.C) { clock.Advance(duration) // emit regular event: - err = alog.EmitAuditEvent(Event{Name: "user.joined"}, EventFields{"apples?": "yes"}) + err = alog.EmitAuditEventLegacy(Event{Name: "user.joined"}, EventFields{"apples?": "yes"}) c.Assert(err, check.IsNil) logfile := alog.localLog.file.Name() @@ -381,19 +378,13 @@ func (a *AuditTestSuite) TestLogRotation(c *check.C) { // TestForwardAndUpload tests forwarding server and upload // server case func (a *AuditTestSuite) TestForwardAndUpload(c *check.C) { - storageDir := c.MkDir() - fileHandler, err := filesessions.NewHandler(filesessions.Config{ - Directory: storageDir, - }) - c.Assert(err, check.IsNil) - fakeClock := clockwork.NewFakeClock() alog, err := NewAuditLog(AuditLogConfig{ DataDir: a.dataDir, RecordSessions: true, Clock: fakeClock, ServerID: "remote", - UploadHandler: fileHandler, + UploadHandler: NewMemoryUploader(), }) c.Assert(err, check.IsNil) defer alog.Close() @@ -404,12 +395,6 @@ func (a *AuditTestSuite) TestForwardAndUpload(c *check.C) { // TestExternalLog tests forwarding server and upload // server case func (a *AuditTestSuite) TestExternalLog(c *check.C) { - storageDir := c.MkDir() - fileHandler, err := filesessions.NewHandler(filesessions.Config{ - Directory: storageDir, - }) - c.Assert(err, check.IsNil) - fileLog, err := NewFileLog(FileLogConfig{ Dir: c.MkDir(), }) @@ -421,7 +406,7 @@ func (a *AuditTestSuite) TestExternalLog(c *check.C) { RecordSessions: true, Clock: fakeClock, ServerID: "remote", - UploadHandler: fileHandler, + UploadHandler: NewMemoryUploader(), ExternalLog: fileLog, }) c.Assert(err, check.IsNil) diff --git a/lib/events/auditwriter.go b/lib/events/auditwriter.go new file mode 100644 index 0000000000000..88f46a223a682 --- /dev/null +++ b/lib/events/auditwriter.go @@ -0,0 +1,406 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "context" + "sync" + "time" + + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + + logrus "github.com/sirupsen/logrus" +) + +// NewAuditWriter returns a new instance of session writer +func NewAuditWriter(cfg AuditWriterConfig) (*AuditWriter, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + stream, err := cfg.Streamer.CreateAuditStream(cfg.Context, cfg.SessionID) + if err != nil { + return nil, trace.Wrap(err) + } + + ctx, cancel := context.WithCancel(cfg.Context) + writer := &AuditWriter{ + mtx: sync.Mutex{}, + cfg: cfg, + stream: NewCheckingStream(stream, cfg.Clock), + log: logrus.WithFields(logrus.Fields{ + trace.Component: cfg.Component, + }), + cancel: cancel, + closeCtx: ctx, + eventsCh: make(chan AuditEvent), + } + go writer.processEvents() + return writer, nil +} + +// AuditWriterConfig configures audit writer +type AuditWriterConfig struct { + // SessionID defines the session to record. + SessionID session.ID + + // ServerID is a server ID to write + ServerID string + + // Namespace is the session namespace. + Namespace string + + // RecordOutput stores info on whether to record session output + RecordOutput bool + + // Component is a component used for logging + Component string + + // Streamer is used to create and resume audit streams + Streamer Streamer + + // Context is a context to cancel the writes + // or any other operations + Context context.Context + + // Clock is used to override time in tests + Clock clockwork.Clock + + // UID is UID generator + UID utils.UID +} + +// CheckAndSetDefaults checks and sets defaults +func (cfg *AuditWriterConfig) CheckAndSetDefaults() error { + if cfg.SessionID.IsZero() { + return trace.BadParameter("missing parameter SessionID") + } + if cfg.Streamer == nil { + return trace.BadParameter("missing parameter Streamer") + } + if cfg.Context == nil { + return trace.BadParameter("missing parameter Context") + } + if cfg.Namespace == "" { + cfg.Namespace = defaults.Namespace + } + if cfg.Clock == nil { + cfg.Clock = clockwork.NewRealClock() + } + if cfg.UID == nil { + cfg.UID = utils.NewRealUID() + } + return nil +} + +// AuditWriter wraps session stream +// and writes audit events to it +type AuditWriter struct { + mtx sync.Mutex + cfg AuditWriterConfig + log *logrus.Entry + lastPrintEvent *SessionPrint + eventIndex int64 + buffer []AuditEvent + eventsCh chan AuditEvent + lastStatus *StreamStatus + stream Stream + cancel context.CancelFunc + closeCtx context.Context +} + +// Status returns channel receiving updates about stream status +// last event index that was uploaded and upload ID +func (a *AuditWriter) Status() <-chan StreamStatus { + return nil +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (a *AuditWriter) Done() <-chan struct{} { + return a.closeCtx.Done() +} + +// Write takes a chunk and writes it into the audit log +func (a *AuditWriter) Write(data []byte) (int, error) { + if !a.cfg.RecordOutput { + return len(data), nil + } + // buffer is copied here to prevent data corruption: + // io.Copy allocates single buffer and calls multiple writes in a loop + // Write is async, this can lead to cases when the buffer is re-used + // and data is corrupted unless we copy the data buffer in the first place + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + + start := time.Now().UTC().Round(time.Millisecond) + for len(dataCopy) != 0 { + printEvent := &SessionPrint{ + Metadata: Metadata{ + Type: SessionPrintEvent, + Time: start, + }, + Data: dataCopy, + } + if printEvent.Size() > MaxProtoMessageSizeBytes { + extraBytes := printEvent.Size() - MaxProtoMessageSizeBytes + printEvent.Data = dataCopy[:extraBytes] + printEvent.Bytes = int64(len(printEvent.Data)) + dataCopy = dataCopy[extraBytes:] + } else { + printEvent.Bytes = int64(len(printEvent.Data)) + dataCopy = nil + } + if err := a.EmitAuditEvent(a.cfg.Context, printEvent); err != nil { + a.log.WithError(err).Error("Failed to emit session print event.") + return 0, trace.Wrap(err) + } + } + return len(data), nil +} + +// EmitAuditEvent emits audit event +func (a *AuditWriter) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + // Event modification is done under lock and in the same goroutine + // as the caller to avoid data races and event copying + if err := a.setupEvent(event); err != nil { + return trace.Wrap(err) + } + + // Without serialization, EmitAuditEvent will call grpc's method directly. + // When BPF callback is emitting events concurrently with session data to the grpc stream, + // it becomes deadlocked (not just blocked temporarily, but permanently) + // in flowcontrol.go, trying to get quota: + // https://github.com/grpc/grpc-go/blob/a906ca0441ceb1f7cd4f5c7de30b8e81ce2ff5e8/internal/transport/flowcontrol.go#L60 + select { + case a.eventsCh <- event: + return nil + case <-ctx.Done(): + return trace.ConnectionProblem(ctx.Err(), "context done") + case <-a.closeCtx.Done(): + return trace.ConnectionProblem(a.closeCtx.Err(), "writer is closed") + } +} + +// Close closes the stream and completes it, +// note that this behavior is different from Stream.Close, +// that aborts it, because of the way the writer is usually used +// the interface - io.WriteCloser has only close method +func (a *AuditWriter) Close(ctx context.Context) error { + a.cancel() + return nil +} + +// Complete closes the stream and marks it finalized, +// releases associated resources, in case of failure, +// closes this stream on the client side +func (a *AuditWriter) Complete(ctx context.Context) error { + a.cancel() + return nil +} + +func (a *AuditWriter) processEvents() { + for { + // From the spec: + // + // https://golang.org/ref/spec#Select_statements + // + // If one or more of the communications can proceed, a single one that + // can proceed is chosen via a uniform pseudo-random selection. + // + // This first drain is necessary to give status updates a priority + // in the event processing loop. The loop could receive + // a status update too late in cases with many events. + // Internal buffer then grows too large and applies + // backpressure without a need. + // + select { + case status := <-a.stream.Status(): + a.updateStatus(status) + default: + } + select { + case status := <-a.stream.Status(): + a.updateStatus(status) + case event := <-a.eventsCh: + a.buffer = append(a.buffer, event) + err := a.stream.EmitAuditEvent(a.cfg.Context, event) + if err == nil { + continue + } + a.log.WithError(err).Debugf("Failed to emit audit event, attempting to recover stream.") + start := time.Now() + if err := a.recoverStream(); err != nil { + a.log.WithError(err).Warningf("Failed to recover stream.") + a.cancel() + return + } + a.log.Debugf("Recovered stream in %v.", time.Since(start)) + case <-a.stream.Done(): + a.log.Debugf("Stream was closed by the server, attempting to recover.") + if err := a.recoverStream(); err != nil { + a.log.WithError(err).Warningf("Failed to recover stream.") + a.cancel() + return + } + case <-a.closeCtx.Done(): + if err := a.stream.Complete(a.cfg.Context); err != nil { + a.log.WithError(err).Warningf("Failed to complete stream") + return + } + return + } + } +} + +func (a *AuditWriter) recoverStream() error { + // if there is a previous stream, close it + if err := a.stream.Close(a.cfg.Context); err != nil { + a.log.WithError(err).Debugf("Failed to close stream.") + } + stream, err := a.tryResumeStream() + if err != nil { + return trace.Wrap(err) + } + a.stream = stream + // replay all non-confirmed audit events to the resumed stream + start := time.Now() + for i := range a.buffer { + err := a.stream.EmitAuditEvent(a.cfg.Context, a.buffer[i]) + if err != nil { + if err := a.stream.Close(a.cfg.Context); err != nil { + a.log.WithError(err).Debugf("Failed to close stream.") + } + return trace.Wrap(err) + } + } + a.log.Debugf("Replayed buffer of %v events to stream in %v", len(a.buffer), time.Since(start)) + return nil +} + +func (a *AuditWriter) tryResumeStream() (Stream, error) { + retry, err := utils.NewLinear(utils.LinearConfig{ + Step: defaults.NetworkRetryDuration, + Max: defaults.NetworkBackoffDuration, + }) + if err != nil { + return nil, trace.Wrap(err) + } + var resumedStream Stream + start := time.Now() + for i := 0; i < defaults.FastAttempts; i++ { + var streamType string + if a.lastStatus == nil { + // The stream was either never created or has failed to receive the + // initial status update + resumedStream, err = a.cfg.Streamer.CreateAuditStream(a.cfg.Context, a.cfg.SessionID) + streamType = "new" + } else { + resumedStream, err = a.cfg.Streamer.ResumeAuditStream( + a.cfg.Context, a.cfg.SessionID, a.lastStatus.UploadID) + streamType = "existing" + } + retry.Inc() + if err == nil { + // The call to CreateAuditStream is async. To learn + // if it was successful get the first status update + // sent by the server after create. + select { + case status := <-resumedStream.Status(): + a.log.Debugf("Resumed %v stream on %v attempt in %v, upload %v.", + streamType, i+1, time.Since(start), status.UploadID) + return resumedStream, nil + case <-retry.After(): + err := resumedStream.Close(a.closeCtx) + if err != nil { + a.log.WithError(err).Debugf("Timed out waiting for stream status update, will retry.") + } else { + a.log.Debugf("Timed out waiting for stream status update, will retry.") + } + case <-a.closeCtx.Done(): + return nil, trace.ConnectionProblem(a.closeCtx.Err(), "operation has been cancelled") + } + } + select { + case <-retry.After(): + a.log.WithError(err).Debugf("Retrying to resume stream after backoff.") + case <-a.closeCtx.Done(): + return nil, trace.ConnectionProblem(a.closeCtx.Err(), "operation has been cancelled") + } + } + return nil, trace.Wrap(err) +} + +func (a *AuditWriter) updateStatus(status StreamStatus) { + a.lastStatus = &status + if status.LastEventIndex < 0 { + return + } + lastIndex := -1 + for i := 0; i < len(a.buffer); i++ { + if status.LastEventIndex < a.buffer[i].GetIndex() { + break + } + lastIndex = i + } + if lastIndex > 0 { + before := len(a.buffer) + a.buffer = a.buffer[lastIndex+1:] + a.log.Debugf("Removed %v saved events, current buffer size: %v.", before-len(a.buffer), len(a.buffer)) + } +} + +func (a *AuditWriter) setupEvent(event AuditEvent) error { + a.mtx.Lock() + defer a.mtx.Unlock() + + if err := CheckAndSetEventFields(event, a.cfg.Clock, a.cfg.UID); err != nil { + return trace.Wrap(err) + } + + sess, ok := event.(SessionMetadataSetter) + if ok { + sess.SetSessionID(string(a.cfg.SessionID)) + } + + srv, ok := event.(ServerMetadataSetter) + if ok { + srv.SetServerNamespace(a.cfg.Namespace) + srv.SetServerID(a.cfg.ServerID) + } + + event.SetIndex(a.eventIndex) + a.eventIndex++ + + printEvent, ok := event.(*SessionPrint) + if !ok { + return nil + } + + if a.lastPrintEvent != nil { + printEvent.Offset = a.lastPrintEvent.Offset + int64(len(a.lastPrintEvent.Data)) + printEvent.DelayMilliseconds = diff(a.lastPrintEvent.Time, printEvent.Time) + a.lastPrintEvent.DelayMilliseconds + printEvent.ChunkIndex = a.lastPrintEvent.ChunkIndex + 1 + } + a.lastPrintEvent = printEvent + return nil +} diff --git a/lib/events/auditwriter_test.go b/lib/events/auditwriter_test.go new file mode 100644 index 0000000000000..7b93c0211e79c --- /dev/null +++ b/lib/events/auditwriter_test.go @@ -0,0 +1,280 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/stretchr/testify/assert" + + "github.com/gravitational/trace" + log "github.com/sirupsen/logrus" + "go.uber.org/atomic" +) + +// TestAuditWriter tests audit writer - a component used for +// session recording +func TestAuditWriter(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) + + // SessionTests multiple session + t.Run("Session", func(t *testing.T) { + test := newAuditWriterTest(t, nil) + defer test.cancel() + + inEvents := GenerateTestSession(SessionParams{ + PrintEvents: 1024, + SessionID: string(test.sid), + }) + + for _, event := range inEvents { + err := test.writer.EmitAuditEvent(test.ctx, event) + assert.NoError(t, err) + } + err := test.writer.Complete(test.ctx) + assert.NoError(t, err) + + select { + case event := <-test.eventsCh: + assert.Equal(t, string(test.sid), event.SessionID) + assert.Nil(t, event.Error) + case <-test.ctx.Done(): + t.Fatalf("Timeout waiting for async upload, try `go test -v` to get more logs for details") + } + + var outEvents []AuditEvent + uploads, err := test.uploader.ListUploads(test.ctx) + assert.NoError(t, err) + parts, err := test.uploader.GetParts(uploads[0].ID) + assert.NoError(t, err) + + for _, part := range parts { + reader := NewProtoReader(bytes.NewReader(part)) + out, err := reader.ReadAll(test.ctx) + assert.Nil(t, err, "part crash %#v", part) + outEvents = append(outEvents, out...) + } + + assert.Equal(t, len(inEvents), len(outEvents)) + assert.Equal(t, inEvents, outEvents) + }) + + // ResumeStart resumes stream after it was broken at the start of trasmission + t.Run("ResumeStart", func(t *testing.T) { + streamCreated := atomic.NewUint64(0) + terminateConnection := atomic.NewUint64(1) + streamResumed := atomic.NewUint64(0) + + test := newAuditWriterTest(t, func(streamer Streamer) (*CallbackStreamer, error) { + return NewCallbackStreamer(CallbackStreamerConfig{ + Inner: streamer, + OnEmitAuditEvent: func(ctx context.Context, sid session.ID, event AuditEvent) error { + if event.GetIndex() > 1 && terminateConnection.CAS(1, 0) == true { + log.Debugf("Terminating connection at event %v", event.GetIndex()) + return trace.ConnectionProblem(nil, "connection terminated") + } + return nil + }, + OnCreateAuditStream: func(ctx context.Context, sid session.ID, streamer Streamer) (Stream, error) { + stream, err := streamer.CreateAuditStream(ctx, sid) + assert.NoError(t, err) + if streamCreated.Inc() == 1 { + // simulate status update loss + select { + case <-stream.Status(): + log.Debugf("Stealing status update.") + case <-time.After(time.Second): + return nil, trace.BadParameter("timeout") + } + } + return stream, nil + }, + OnResumeAuditStream: func(ctx context.Context, sid session.ID, uploadID string, streamer Streamer) (Stream, error) { + stream, err := streamer.ResumeAuditStream(ctx, sid, uploadID) + assert.NoError(t, err) + streamResumed.Inc() + return stream, nil + }, + }) + }) + + defer test.cancel() + + inEvents := GenerateTestSession(SessionParams{ + PrintEvents: 1024, + SessionID: string(test.sid), + }) + + start := time.Now() + for _, event := range inEvents { + err := test.writer.EmitAuditEvent(test.ctx, event) + assert.NoError(t, err) + } + log.Debugf("Emitted %v events in %v.", len(inEvents), time.Since(start)) + err := test.writer.Complete(test.ctx) + assert.NoError(t, err) + + outEvents := test.collectEvents(t) + + assert.Equal(t, len(inEvents), len(outEvents)) + assert.Equal(t, inEvents, outEvents) + assert.Equal(t, 0, int(streamResumed.Load()), "Stream not resumed.") + assert.Equal(t, 2, int(streamCreated.Load()), "Stream created twice.") + }) + + // ResumeMiddle resumes stream after it was broken in the middle of transmission + t.Run("ResumeMiddle", func(t *testing.T) { + streamCreated := atomic.NewUint64(0) + terminateConnection := atomic.NewUint64(1) + streamResumed := atomic.NewUint64(0) + + test := newAuditWriterTest(t, func(streamer Streamer) (*CallbackStreamer, error) { + return NewCallbackStreamer(CallbackStreamerConfig{ + Inner: streamer, + OnEmitAuditEvent: func(ctx context.Context, sid session.ID, event AuditEvent) error { + if event.GetIndex() > 600 && terminateConnection.CAS(1, 0) == true { + log.Debugf("Terminating connection at event %v", event.GetIndex()) + return trace.ConnectionProblem(nil, "connection terminated") + } + return nil + }, + OnCreateAuditStream: func(ctx context.Context, sid session.ID, streamer Streamer) (Stream, error) { + stream, err := streamer.CreateAuditStream(ctx, sid) + assert.NoError(t, err) + streamCreated.Inc() + return stream, nil + }, + OnResumeAuditStream: func(ctx context.Context, sid session.ID, uploadID string, streamer Streamer) (Stream, error) { + stream, err := streamer.ResumeAuditStream(ctx, sid, uploadID) + assert.NoError(t, err) + streamResumed.Inc() + return stream, nil + }, + }) + }) + + defer test.cancel() + + inEvents := GenerateTestSession(SessionParams{ + PrintEvents: 1024, + SessionID: string(test.sid), + }) + + start := time.Now() + for _, event := range inEvents { + err := test.writer.EmitAuditEvent(test.ctx, event) + assert.NoError(t, err) + } + log.Debugf("Emitted all events in %v.", time.Since(start)) + err := test.writer.Complete(test.ctx) + assert.NoError(t, err) + + outEvents := test.collectEvents(t) + + assert.Equal(t, len(inEvents), len(outEvents)) + assert.Equal(t, inEvents, outEvents) + assert.Equal(t, 1, int(streamResumed.Load()), "Stream resumed once.") + assert.Equal(t, 1, int(streamResumed.Load()), "Stream created once.") + }) + +} + +type auditWriterTest struct { + eventsCh chan UploadEvent + uploader *MemoryUploader + ctx context.Context + cancel context.CancelFunc + writer *AuditWriter + sid session.ID +} + +type newStreamerFn func(streamer Streamer) (*CallbackStreamer, error) + +func newAuditWriterTest(t *testing.T, newStreamer newStreamerFn) *auditWriterTest { + eventsCh := make(chan UploadEvent, 1) + uploader := NewMemoryUploader(eventsCh) + protoStreamer, err := NewProtoStreamer(ProtoStreamerConfig{ + Uploader: uploader, + }) + assert.NoError(t, err) + + var streamer Streamer + if newStreamer != nil { + callbackStreamer, err := newStreamer(protoStreamer) + assert.NoError(t, err) + streamer = callbackStreamer + } else { + streamer = protoStreamer + } + + ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) + + sid := session.NewID() + writer, err := NewAuditWriter(AuditWriterConfig{ + SessionID: sid, + Namespace: defaults.Namespace, + RecordOutput: true, + Streamer: streamer, + Context: ctx, + }) + assert.NoError(t, err) + + return &auditWriterTest{ + ctx: ctx, + cancel: cancel, + writer: writer, + uploader: uploader, + eventsCh: eventsCh, + sid: sid, + } +} + +func (a *auditWriterTest) collectEvents(t *testing.T) []AuditEvent { + start := time.Now() + var uploadID string + select { + case event := <-a.eventsCh: + log.Debugf("Got status update, upload %v in %v.", event.UploadID, time.Since(start)) + assert.Equal(t, string(a.sid), event.SessionID) + assert.Nil(t, event.Error) + uploadID = event.UploadID + case <-a.ctx.Done(): + t.Fatalf("Timeout waiting for async upload, try `go test -v` to get more logs for details") + } + + parts, err := a.uploader.GetParts(uploadID) + assert.NoError(t, err) + + var readers []io.Reader + for _, part := range parts { + readers = append(readers, bytes.NewReader(part)) + } + reader := NewProtoReader(io.MultiReader(readers...)) + outEvents, err := reader.ReadAll(a.ctx) + assert.Nil(t, err, "failed to read") + log.WithFields(reader.GetStats().ToFields()).Debugf("Reader stats.") + + return outEvents +} diff --git a/lib/events/codes.go b/lib/events/codes.go index c2e125833187b..650f267de51c9 100644 --- a/lib/events/codes.go +++ b/lib/events/codes.go @@ -25,240 +25,240 @@ type Event struct { } var ( - // UserLocalLogin is emitted when a local user successfully logs in. - UserLocalLogin = Event{ + // UserLocalLoginE is emitted when a local user successfully logs in. + UserLocalLoginE = Event{ Name: UserLoginEvent, Code: UserLocalLoginCode, } - // UserLocalLoginFailure is emitted when a local user login attempt fails. - UserLocalLoginFailure = Event{ + // UserLocalLoginFailureE is emitted when a local user login attempt fails. + UserLocalLoginFailureE = Event{ Name: UserLoginEvent, Code: UserLocalLoginFailureCode, } - // UserSSOLogin is emitted when an SSO user successfully logs in. - UserSSOLogin = Event{ + // UserSSOLoginE is emitted when an SSO user successfully logs in. + UserSSOLoginE = Event{ Name: UserLoginEvent, Code: UserSSOLoginCode, } - // UserSSOLoginFailure is emitted when an SSO user login attempt fails. - UserSSOLoginFailure = Event{ + // UserSSOLoginFailureE is emitted when an SSO user login attempt fails. + UserSSOLoginFailureE = Event{ Name: UserLoginEvent, Code: UserSSOLoginFailureCode, } - // UserUpdate is emitted when a user is updated. - UserUpdate = Event{ + // UserUpdateE is emitted when a user is updated. + UserUpdateE = Event{ Name: UserUpdatedEvent, Code: UserUpdateCode, } - // UserDelete is emitted when a user is deleted. - UserDelete = Event{ + // UserDeleteE is emitted when a user is deleted. + UserDeleteE = Event{ Name: UserDeleteEvent, Code: UserDeleteCode, } - // UserCreate is emitted when a user is created. - UserCreate = Event{ + // UserCreateE is emitted when a user is created. + UserCreateE = Event{ Name: UserCreateEvent, Code: UserCreateCode, } - // UserPasswordChange is emitted when a user changes their own password. - UserPasswordChange = Event{ + // UserPasswordChangeE is emitted when a user changes their own password. + UserPasswordChangeE = Event{ Name: UserPasswordChangeEvent, Code: UserPasswordChangeCode, } - // SessionStart is emitted when a user starts a new session. - SessionStart = Event{ + // SessionStartE is emitted when a user starts a new session. + SessionStartE = Event{ Name: SessionStartEvent, Code: SessionStartCode, } - // SessionJoin is emitted when a user joins the session. - SessionJoin = Event{ + // SessionJoinE is emitted when a user joins the session. + SessionJoinE = Event{ Name: SessionJoinEvent, Code: SessionJoinCode, } - // TerminalResize is emitted when a user resizes the terminal. - TerminalResize = Event{ + // TerminalResizeE is emitted when a user resizes the terminal. + TerminalResizeE = Event{ Name: ResizeEvent, Code: TerminalResizeCode, } - // SessionLeave is emitted when a user leaves the session. - SessionLeave = Event{ + // SessionLeaveE is emitted when a user leaves the session. + SessionLeaveE = Event{ Name: SessionLeaveEvent, Code: SessionLeaveCode, } - // SessionEnd is emitted when a user ends the session. - SessionEnd = Event{ + // SessionEndE is emitted when a user ends the session. + SessionEndE = Event{ Name: SessionEndEvent, Code: SessionEndCode, } - // SessionUpload is emitted after a session recording has been uploaded. - SessionUpload = Event{ + // SessionUploadE is emitted after a session recording has been uploaded. + SessionUploadE = Event{ Name: SessionUploadEvent, Code: SessionUploadCode, } - // SessionData is emitted to report session data usage. - SessionData = Event{ + // SessionDataE is emitted to report session data usage. + SessionDataE = Event{ Name: SessionDataEvent, Code: SessionDataCode, } - // Subsystem is emitted when a user requests a new subsystem. - Subsystem = Event{ + // SubsystemE is emitted when a user requests a new subsystem. + SubsystemE = Event{ Name: SubsystemEvent, Code: SubsystemCode, } - // SubsystemFailure is emitted when a user subsystem request fails. - SubsystemFailure = Event{ + // SubsystemFailureE is emitted when a user subsystem request fails. + SubsystemFailureE = Event{ Name: SubsystemEvent, Code: SubsystemFailureCode, } - // Exec is emitted when a user executes a command on a node. - Exec = Event{ + // ExecE is emitted when a user executes a command on a node. + ExecE = Event{ Name: ExecEvent, Code: ExecCode, } - // ExecFailure is emitted when a user command execution fails. - ExecFailure = Event{ + // ExecFailureE is emitted when a user command execution fails. + ExecFailureE = Event{ Name: ExecEvent, Code: ExecFailureCode, } - // X11Forward is emitted when a user requests X11 forwarding. - X11Forward = Event{ + // X11ForwardE is emitted when a user requests X11 forwarding. + X11ForwardE = Event{ Name: X11ForwardEvent, Code: X11ForwardCode, } - // X11ForwardFailure is emitted when an X11 forwarding request fails. - X11ForwardFailure = Event{ + // X11ForwardFailureE is emitted when an X11 forwarding request fails. + X11ForwardFailureE = Event{ Name: X11ForwardEvent, Code: X11ForwardFailureCode, } - // PortForward is emitted when a user requests port forwarding. - PortForward = Event{ + // PortForwardE is emitted when a user requests port forwarding. + PortForwardE = Event{ Name: PortForwardEvent, Code: PortForwardCode, } - // PortForwardFailure is emitted when a port forward request fails. - PortForwardFailure = Event{ + // PortForwardFailureE is emitted when a port forward request fails. + PortForwardFailureE = Event{ Name: PortForwardEvent, Code: PortForwardFailureCode, } - // SCPDownload is emitted when a user downloads a file. - SCPDownload = Event{ + // SCPDownloadE is emitted when a user downloads a file. + SCPDownloadE = Event{ Name: SCPEvent, Code: SCPDownloadCode, } - // SCPDownloadFailure is emitted when a file download fails. - SCPDownloadFailure = Event{ + // SCPDownloadFailureE is emitted when a file download fails. + SCPDownloadFailureE = Event{ Name: SCPEvent, Code: SCPDownloadFailureCode, } - // SCPUpload is emitted when a user uploads a file. - SCPUpload = Event{ + // SCPUploadE is emitted when a user uploads a file. + SCPUploadE = Event{ Name: SCPEvent, Code: SCPUploadCode, } - // SCPUploadFailure is emitted when a file upload fails. - SCPUploadFailure = Event{ + // SCPUploadFailureE is emitted when a file upload fails. + SCPUploadFailureE = Event{ Name: SCPEvent, Code: SCPUploadFailureCode, } - // ClientDisconnect is emitted when a user session is disconnected. - ClientDisconnect = Event{ + // ClientDisconnectE is emitted when a user session is disconnected. + ClientDisconnectE = Event{ Name: ClientDisconnectEvent, Code: ClientDisconnectCode, } - // AuthAttemptFailure is emitted upon a failed authentication attempt. - AuthAttemptFailure = Event{ + // AuthAttemptFailureE is emitted upon a failed authentication attempt. + AuthAttemptFailureE = Event{ Name: AuthAttemptEvent, Code: AuthAttemptFailureCode, } - // AccessRequestCreated is emitted when an access request is created. - AccessRequestCreated = Event{ + // AccessRequestCreatedE is emitted when an access request is created. + AccessRequestCreatedE = Event{ Name: AccessRequestCreateEvent, Code: AccessRequestCreateCode, } - AccessRequestUpdated = Event{ + AccessRequestUpdatedE = Event{ Name: AccessRequestUpdateEvent, Code: AccessRequestUpdateCode, } - // SessionCommand is emitted upon execution of a command when using enhanced + // SessionCommandE is emitted upon execution of a command when using enhanced // session recording. - SessionCommand = Event{ + SessionCommandE = Event{ Name: SessionCommandEvent, Code: SessionCommandCode, } - // SessionDisk is emitted upon open of a file when using enhanced session recording. - SessionDisk = Event{ + // SessionDiskE is emitted upon open of a file when using enhanced session recording. + SessionDiskE = Event{ Name: SessionDiskEvent, Code: SessionDiskCode, } - // SessionNetwork is emitted when a network requests is is issued when + // SessionNetworkE is emitted when a network request is issued when // using enhanced session recording. - SessionNetwork = Event{ + SessionNetworkE = Event{ Name: SessionNetworkEvent, Code: SessionNetworkCode, } - // ResetPasswordTokenCreated is emitted when token is created. - ResetPasswordTokenCreated = Event{ + // ResetPasswordTokenCreatedE is emitted when a password reset token is created. + ResetPasswordTokenCreatedE = Event{ Name: ResetPasswordTokenCreateEvent, Code: ResetPasswordTokenCreateCode, } - // RoleCreated is emitted when a role is created/updated. - RoleCreated = Event{ + // RoleCreatedE is emitted when a role is created/updated. + RoleCreatedE = Event{ Name: RoleCreatedEvent, Code: RoleCreatedCode, } - // RoleDeleted is emitted when a role is deleted. - RoleDeleted = Event{ + // RoleDeletedE is emitted when a role is deleted. + RoleDeletedE = Event{ Name: RoleDeletedEvent, Code: RoleDeletedCode, } - // TrustedClusterCreate is emitted when a trusted cluster relationship is created. - TrustedClusterCreate = Event{ + // TrustedClusterCreateE is emitted when a trusted cluster relationship is created. + TrustedClusterCreateE = Event{ Name: TrustedClusterCreateEvent, Code: TrustedClusterCreateCode, } - // TrustedClusterDelete is emitted when a trusted cluster is removed from the root cluster. - TrustedClusterDelete = Event{ + // TrustedClusterDeleteE is emitted when a trusted cluster is removed from the root cluster. + TrustedClusterDeleteE = Event{ Name: TrustedClusterDeleteEvent, Code: TrustedClusterDeleteCode, } - // TrustedClusterTokenCreate is emitted when a new join + // TrustedClusterTokenCreateE is emitted when a new join // token for trusted cluster is created. - TrustedClusterTokenCreate = Event{ + TrustedClusterTokenCreateE = Event{ Name: TrustedClusterTokenCreateEvent, Code: TrustedClusterTokenCreateCode, } - // GithubConnectorCreated is emitted when a Github connector is created/updated. - GithubConnectorCreated = Event{ + // GithubConnectorCreatedE is emitted when a Github connector is created/updated. + GithubConnectorCreatedE = Event{ Name: GithubConnectorCreatedEvent, Code: GithubConnectorCreatedCode, } - // GithubConnectorDeleted is emitted when a Github connector is deleted. - GithubConnectorDeleted = Event{ + // GithubConnectorDeletedE is emitted when a Github connector is deleted. + GithubConnectorDeletedE = Event{ Name: GithubConnectorDeletedEvent, Code: GithubConnectorDeletedCode, } - // OIDCConnectorCreated is emitted when an OIDC connector is created/updated. - OIDCConnectorCreated = Event{ + // OIDCConnectorCreatedE is emitted when an OIDC connector is created/updated. + OIDCConnectorCreatedE = Event{ Name: OIDCConnectorCreatedEvent, Code: OIDCConnectorCreatedCode, } - // OIDCConnectorDeleted is emitted when an OIDC connector is deleted. - OIDCConnectorDeleted = Event{ + // OIDCConnectorDeletedE is emitted when an OIDC connector is deleted. + OIDCConnectorDeletedE = Event{ Name: OIDCConnectorDeletedEvent, Code: OIDCConnectorDeletedCode, } - // SAMLConnectorCreated is emitted when a SAML connector is created/updated. - SAMLConnectorCreated = Event{ + // SAMLConnectorCreatedE is emitted when a SAML connector is created/updated. + SAMLConnectorCreatedE = Event{ Name: SAMLConnectorCreatedEvent, Code: SAMLConnectorCreatedCode, } - // SAMLConnectorDeleted is emitted when a SAML connector is deleted. - SAMLConnectorDeleted = Event{ + // SAMLConnectorDeletedE is emitted when a SAML connector is deleted. + SAMLConnectorDeletedE = Event{ Name: SAMLConnectorDeletedEvent, Code: SAMLConnectorDeletedCode, } - // SessionRejected is emitted when a user hits `max_connections`. - SessionRejected = Event{ + // SessionRejectedE is emitted when a user hits `max_connections`. + SessionRejectedE = Event{ Name: SessionRejectedEvent, Code: SessionRejectedCode, } diff --git a/lib/events/complete.go b/lib/events/complete.go new file mode 100644 index 0000000000000..b9da7b1a8d7bb --- /dev/null +++ b/lib/events/complete.go @@ -0,0 +1,147 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "context" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/defaults" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + log "github.com/sirupsen/logrus" +) + +// UploadCompleterConfig specifies configuration for the uploader +type UploadCompleterConfig struct { + // Uploader allows the completer to list and complete uploads + Uploader MultipartUploader + // GracePeriod is the period after which uploads are considered + // abandoned and will be completed + GracePeriod time.Duration + // Component is a component used in logging + Component string + // CheckPeriod is a period for checking the upload + CheckPeriod time.Duration + // Clock is used to override clock in tests + Clock clockwork.Clock + // Unstarted does not start automatic goroutine, + // is useful when completer is embedded in another function + Unstarted bool +} + +// CheckAndSetDefaults checks and sets default values +func (cfg *UploadCompleterConfig) CheckAndSetDefaults() error { + if cfg.Uploader == nil { + return trace.BadParameter("missing parameter Uploader") + } + if cfg.GracePeriod == 0 { + cfg.GracePeriod = defaults.UploadGracePeriod + } + if cfg.Component == "" { + cfg.Component = teleport.ComponentAuth + } + if cfg.CheckPeriod == 0 { + cfg.CheckPeriod = defaults.LowResPollingPeriod + } + if cfg.Clock == nil { + cfg.Clock = clockwork.NewRealClock() + } + return nil +} + +// NewUploadCompleter returns a new instance of the upload completer +// the completer has to be closed to release resources and goroutines +func NewUploadCompleter(cfg UploadCompleterConfig) (*UploadCompleter, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + ctx, cancel := context.WithCancel(context.Background()) + u := &UploadCompleter{ + cfg: cfg, + log: log.WithFields(log.Fields{ + trace.Component: teleport.Component(cfg.Component, "completer"), + }), + cancel: cancel, + closeCtx: ctx, + } + if !cfg.Unstarted { + go u.run() + } + return u, nil +} + +// UploadCompleter periodically scans uploads that have not been completed +// and completes them +type UploadCompleter struct { + cfg UploadCompleterConfig + log *log.Entry + cancel context.CancelFunc + closeCtx context.Context +} + +func (u *UploadCompleter) run() { + ticker := u.cfg.Clock.NewTicker(u.cfg.CheckPeriod) + defer ticker.Stop() + for { + select { + case <-ticker.Chan(): + if err := u.CheckUploads(u.closeCtx); err != nil { + u.log.WithError(err).Warningf("Failed to check uploads.") + } + case <-u.closeCtx.Done(): + return + } + } +} + +// CheckUploads fetches uploads, checks if any uploads exceed grace period +// and completes unfinished uploads +func (u *UploadCompleter) CheckUploads(ctx context.Context) error { + uploads, err := u.cfg.Uploader.ListUploads(ctx) + if err != nil { + return trace.Wrap(err) + } + u.log.Debugf("Got %v active uploads.", len(uploads)) + for _, upload := range uploads { + gracePoint := upload.Initiated.Add(u.cfg.GracePeriod) + if !gracePoint.Before(u.cfg.Clock.Now()) { + return nil + } + parts, err := u.cfg.Uploader.ListParts(ctx, upload) + if err != nil { + return trace.Wrap(err) + } + if len(parts) == 0 { + continue + } + u.log.Debugf("Upload %v grace period is over. Trying complete.", upload) + if err := u.cfg.Uploader.CompleteUpload(ctx, upload, parts); err != nil { + return trace.Wrap(err) + } + u.log.Debugf("Completed upload %v.", upload) + } + return nil +} + +// Close closes all outstanding operations without waiting +func (u *UploadCompleter) Close() error { + u.cancel() + return nil +} diff --git a/lib/events/convert.go b/lib/events/convert.go new file mode 100644 index 0000000000000..17134fb59e570 --- /dev/null +++ b/lib/events/convert.go @@ -0,0 +1,449 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "bytes" + "encoding/json" + "time" + + "github.com/gravitational/teleport/lib/utils" + + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/types" + "github.com/gravitational/trace" +) + +// EncodeMap encodes map[string]interface{} to map +func EncodeMap(msg map[string]interface{}) (*Struct, error) { + data, err := json.Marshal(msg) + if err != nil { + return nil, trace.Wrap(err) + } + pbs := types.Struct{} + if err = jsonpb.Unmarshal(bytes.NewReader(data), &pbs); err != nil { + return nil, trace.Wrap(err) + } + return &Struct{Struct: pbs}, nil +} + +// EncodeMapStrings encodes map[string][]string to map +func EncodeMapStrings(msg map[string][]string) (*Struct, error) { + data, err := json.Marshal(msg) + if err != nil { + return nil, trace.Wrap(err) + } + pbs := types.Struct{} + if err = jsonpb.Unmarshal(bytes.NewReader(data), &pbs); err != nil { + return nil, trace.Wrap(err) + } + return &Struct{Struct: pbs}, nil +} + +// MustEncodeMap panics if EncodeMap returns error +func MustEncodeMap(msg map[string]interface{}) *Struct { + m, err := EncodeMap(msg) + if err != nil { + panic(err) + } + return m +} + +// decodeToMap converts a pb.Struct to a map from strings to Go types. +func decodeToMap(s *types.Struct) (map[string]interface{}, error) { + if s == nil { + return nil, nil + } + m := map[string]interface{}{} + for k, v := range s.Fields { + var err error + m[k], err = decodeValue(v) + if err != nil { + return nil, trace.Wrap(err) + } + } + return m, nil +} + +// decodeValue decodes proto value to golang type +func decodeValue(v *types.Value) (interface{}, error) { + switch k := v.Kind.(type) { + case *types.Value_NullValue: + return nil, nil + case *types.Value_NumberValue: + return k.NumberValue, nil + case *types.Value_StringValue: + return k.StringValue, nil + case *types.Value_BoolValue: + return k.BoolValue, nil + case *types.Value_StructValue: + return decodeToMap(k.StructValue) + case *types.Value_ListValue: + s := make([]interface{}, len(k.ListValue.Values)) + for i, e := range k.ListValue.Values { + var err error + s[i], err = decodeValue(e) + if err != nil { + return nil, trace.Wrap(err) + } + } + return s, nil + default: + return nil, trace.BadParameter("protostruct: unknown kind %v", k) + } +} + +// Struct is a wrapper around types.Struct +// that marshals itself into json +type Struct struct { + types.Struct +} + +// MarshalJSON marshals boolean value. +func (s *Struct) MarshalJSON() ([]byte, error) { + m, err := decodeToMap(&s.Struct) + if err != nil { + return nil, trace.Wrap(err) + } + return utils.FastMarshal(m) +} + +// UnmarshalJSON unmarshals JSON from string or bool, +// in case if value is missing or not recognized, defaults to false +func (s *Struct) UnmarshalJSON(data []byte) error { + if len(data) == 0 { + return nil + } + err := jsonpb.Unmarshal(bytes.NewReader(data), &s.Struct) + if err != nil { + return trace.Wrap(err) + } + return nil +} + +// GetType returns event type +func (m *Metadata) GetType() string { + return m.Type +} + +// SetType sets unique type +func (m *Metadata) SetType(etype string) { + m.Type = etype +} + +// GetID returns event ID +func (m *Metadata) GetID() string { + return m.ID +} + +// GetCode returns event code +func (m *Metadata) GetCode() string { + return m.Code +} + +// SetCode sets event code +func (m *Metadata) SetCode(code string) { + m.Code = code +} + +// SetID sets event ID +func (m *Metadata) SetID(id string) { + m.ID = id +} + +// GetTime returns event time +func (m *Metadata) GetTime() time.Time { + return m.Time +} + +// SetTime sets event time +func (m *Metadata) SetTime(tm time.Time) { + m.Time = tm +} + +// SetIndex sets event index +func (m *Metadata) SetIndex(idx int64) { + m.Index = idx +} + +// GetIndex gets event index +func (m *Metadata) GetIndex() int64 { + return m.Index +} + +// GetServerID returns event server ID +func (m *ServerMetadata) GetServerID() string { + return m.ServerID +} + +// SetServerID sets event server ID +func (m *ServerMetadata) SetServerID(id string) { + m.ServerID = id +} + +// GetServerNamespace returns event server ID +func (m *ServerMetadata) GetServerNamespace() string { + return m.ServerNamespace +} + +// SetServerNamespace sets server namespace +func (m *ServerMetadata) SetServerNamespace(ns string) { + m.ServerNamespace = ns +} + +// GetSessionID returns event session ID +func (m *SessionMetadata) GetSessionID() string { + return m.SessionID +} + +// MustToOneOf converts audit event to OneOf +// or panics, used in tests +func MustToOneOf(in AuditEvent) *OneOf { + out, err := ToOneOf(in) + if err != nil { + panic(err) + } + return out +} + +// ToOneOf converts audit event to union type of the events +func ToOneOf(in AuditEvent) (*OneOf, error) { + out := OneOf{} + + switch e := in.(type) { + case *UserLogin: + out.Event = &OneOf_UserLogin{ + UserLogin: e, + } + case *UserCreate: + out.Event = &OneOf_UserCreate{ + UserCreate: e, + } + case *UserDelete: + out.Event = &OneOf_UserDelete{ + UserDelete: e, + } + case *UserPasswordChange: + out.Event = &OneOf_UserPasswordChange{ + UserPasswordChange: e, + } + case *SessionStart: + out.Event = &OneOf_SessionStart{ + SessionStart: e, + } + case *SessionJoin: + out.Event = &OneOf_SessionJoin{ + SessionJoin: e, + } + case *SessionPrint: + out.Event = &OneOf_SessionPrint{ + SessionPrint: e, + } + case *SessionReject: + out.Event = &OneOf_SessionReject{ + SessionReject: e, + } + case *Resize: + out.Event = &OneOf_Resize{ + Resize: e, + } + case *SessionEnd: + out.Event = &OneOf_SessionEnd{ + SessionEnd: e, + } + case *SessionCommand: + out.Event = &OneOf_SessionCommand{ + SessionCommand: e, + } + case *SessionDisk: + out.Event = &OneOf_SessionDisk{ + SessionDisk: e, + } + case *SessionNetwork: + out.Event = &OneOf_SessionNetwork{ + SessionNetwork: e, + } + case *SessionData: + out.Event = &OneOf_SessionData{ + SessionData: e, + } + case *SessionLeave: + out.Event = &OneOf_SessionLeave{ + SessionLeave: e, + } + case *PortForward: + out.Event = &OneOf_PortForward{ + PortForward: e, + } + case *X11Forward: + out.Event = &OneOf_X11Forward{ + X11Forward: e, + } + case *Subsystem: + out.Event = &OneOf_Subsystem{ + Subsystem: e, + } + case *SCP: + out.Event = &OneOf_SCP{ + SCP: e, + } + case *Exec: + out.Event = &OneOf_Exec{ + Exec: e, + } + case *ClientDisconnect: + out.Event = &OneOf_ClientDisconnect{ + ClientDisconnect: e, + } + case *AuthAttempt: + out.Event = &OneOf_AuthAttempt{ + AuthAttempt: e, + } + case *AccessRequestCreate: + out.Event = &OneOf_AccessRequestCreate{ + AccessRequestCreate: e, + } + case *RoleCreate: + out.Event = &OneOf_RoleCreate{ + RoleCreate: e, + } + case *RoleDelete: + out.Event = &OneOf_RoleDelete{ + RoleDelete: e, + } + case *ResetPasswordTokenCreate: + out.Event = &OneOf_ResetPasswordTokenCreate{ + ResetPasswordTokenCreate: e, + } + case *TrustedClusterCreate: + out.Event = &OneOf_TrustedClusterCreate{ + TrustedClusterCreate: e, + } + case *TrustedClusterDelete: + out.Event = &OneOf_TrustedClusterDelete{ + TrustedClusterDelete: e, + } + case *TrustedClusterTokenCreate: + out.Event = &OneOf_TrustedClusterTokenCreate{ + TrustedClusterTokenCreate: e, + } + case *GithubConnectorCreate: + out.Event = &OneOf_GithubConnectorCreate{ + GithubConnectorCreate: e, + } + case *GithubConnectorDelete: + out.Event = &OneOf_GithubConnectorDelete{ + GithubConnectorDelete: e, + } + case *OIDCConnectorCreate: + out.Event = &OneOf_OIDCConnectorCreate{ + OIDCConnectorCreate: e, + } + case *OIDCConnectorDelete: + out.Event = &OneOf_OIDCConnectorDelete{ + OIDCConnectorDelete: e, + } + case *SAMLConnectorCreate: + out.Event = &OneOf_SAMLConnectorCreate{ + SAMLConnectorCreate: e, + } + case *SAMLConnectorDelete: + out.Event = &OneOf_SAMLConnectorDelete{ + SAMLConnectorDelete: e, + } + default: + return nil, trace.BadParameter("event type %T is not supported", in) + } + return &out, nil +} + +// FromOneOf converts audit event from one of wrapper to interface +func FromOneOf(in OneOf) (AuditEvent, error) { + if e := in.GetUserLogin(); e != nil { + return e, nil + } else if e := in.GetUserCreate(); e != nil { + return e, nil + } else if e := in.GetUserDelete(); e != nil { + return e, nil + } else if e := in.GetUserPasswordChange(); e != nil { + return e, nil + } else if e := in.GetSessionStart(); e != nil { + return e, nil + } else if e := in.GetSessionJoin(); e != nil { + return e, nil + } else if e := in.GetSessionPrint(); e != nil { + return e, nil + } else if e := in.GetSessionReject(); e != nil { + return e, nil + } else if e := in.GetResize(); e != nil { + return e, nil + } else if e := in.GetSessionEnd(); e != nil { + return e, nil + } else if e := in.GetSessionCommand(); e != nil { + return e, nil + } else if e := in.GetSessionDisk(); e != nil { + return e, nil + } else if e := in.GetSessionNetwork(); e != nil { + return e, nil + } else if e := in.GetSessionData(); e != nil { + return e, nil + } else if e := in.GetSessionLeave(); e != nil { + return e, nil + } else if e := in.GetPortForward(); e != nil { + return e, nil + } else if e := in.GetX11Forward(); e != nil { + return e, nil + } else if e := in.GetSCP(); e != nil { + return e, nil + } else if e := in.GetExec(); e != nil { + return e, nil + } else if e := in.GetSubsystem(); e != nil { + return e, nil + } else if e := in.GetClientDisconnect(); e != nil { + return e, nil + } else if e := in.GetAuthAttempt(); e != nil { + return e, nil + } else if e := in.GetAccessRequestCreate(); e != nil { + return e, nil + } else if e := in.GetResetPasswordTokenCreate(); e != nil { + return e, nil + } else if e := in.GetRoleCreate(); e != nil { + return e, nil + } else if e := in.GetRoleDelete(); e != nil { + return e, nil + } else if e := in.GetTrustedClusterCreate(); e != nil { + return e, nil + } else if e := in.GetTrustedClusterDelete(); e != nil { + return e, nil + } else if e := in.GetTrustedClusterTokenCreate(); e != nil { + return e, nil + } else if e := in.GetGithubConnectorCreate(); e != nil { + return e, nil + } else if e := in.GetGithubConnectorDelete(); e != nil { + return e, nil + } else if e := in.GetOIDCConnectorCreate(); e != nil { + return e, nil + } else if e := in.GetOIDCConnectorDelete(); e != nil { + return e, nil + } else if e := in.GetSAMLConnectorCreate(); e != nil { + return e, nil + } else if e := in.GetSAMLConnectorDelete(); e != nil { + return e, nil + } else { + return nil, trace.BadParameter("received unsupported event %T", in.Event) + } +} diff --git a/lib/events/discard.go b/lib/events/discard.go index be2aa70a40e5b..5d766689cdc6c 100644 --- a/lib/events/discard.go +++ b/lib/events/discard.go @@ -39,8 +39,7 @@ func (d *DiscardAuditLog) WaitForDelivery(context.Context) error { func (d *DiscardAuditLog) Close() error { return nil } - -func (d *DiscardAuditLog) EmitAuditEvent(event Event, fields EventFields) error { +func (d *DiscardAuditLog) EmitAuditEventLegacy(event Event, fields EventFields) error { return nil } func (d *DiscardAuditLog) PostSessionSlice(SessionSlice) error { diff --git a/lib/events/dynamoevents/dynamoevents.go b/lib/events/dynamoevents/dynamoevents.go index 22636f4d06815..3a80a89d13a18 100644 --- a/lib/events/dynamoevents/dynamoevents.go +++ b/lib/events/dynamoevents/dynamoevents.go @@ -217,7 +217,49 @@ const ( ) // EmitAuditEvent emits audit event -func (l *Log) EmitAuditEvent(ev events.Event, fields events.EventFields) error { +func (l *Log) EmitAuditEvent(ctx context.Context, in events.AuditEvent) error { + data, err := utils.FastMarshal(in) + if err != nil { + return trace.Wrap(err) + } + + var sessionID string + getter, ok := in.(events.SessionMetadataGetter) + if ok && getter.GetSessionID() != "" { + sessionID = getter.GetSessionID() + } else { + // no session id - global event gets a random uuid to get a good partition + // key distribution + sessionID = uuid.New() + } + + e := event{ + SessionID: sessionID, + EventIndex: in.GetIndex(), + EventType: in.GetType(), + EventNamespace: defaults.Namespace, + CreatedAt: in.GetTime().Unix(), + Fields: string(data), + } + l.setExpiry(&e) + av, err := dynamodbattribute.MarshalMap(e) + if err != nil { + return trace.Wrap(err) + } + input := dynamodb.PutItemInput{ + Item: av, + TableName: aws.String(l.Tablename), + } + _, err = l.svc.PutItemWithContext(ctx, &input) + err = convertError(err) + if err != nil { + return trace.Wrap(err) + } + return nil +} + +// EmitAuditEventLegacy emits audit event +func (l *Log) EmitAuditEventLegacy(ev events.Event, fields events.EventFields) error { sessionID := fields.GetString(events.SessionEventID) eventIndex := fields.GetInt(events.EventIndex) // no session id - global event gets a random uuid to get a good partition @@ -588,6 +630,53 @@ func (b *Log) Close() error { return nil } +// deleteAllItems deletes all items from the database, used in tests +func (b *Log) deleteAllItems() error { + out, err := b.svc.Scan(&dynamodb.ScanInput{TableName: aws.String(b.Tablename)}) + if err != nil { + return trace.Wrap(err) + } + var requests []*dynamodb.WriteRequest + for _, item := range out.Items { + requests = append(requests, &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: map[string]*dynamodb.AttributeValue{ + keySessionID: item[keySessionID], + keyEventIndex: item[keyEventIndex], + }, + }, + }) + } + if len(requests) == 0 { + return nil + } + req, _ := b.svc.BatchWriteItemRequest(&dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]*dynamodb.WriteRequest{ + b.Tablename: requests, + }, + }) + err = req.Send() + err = convertError(err) + if err != nil { + return trace.Wrap(err) + } + return nil +} + +// deleteTable deletes DynamoDB table with a given name +func (b *Log) deleteTable(tableName string, wait bool) error { + tn := aws.String(tableName) + _, err := b.svc.DeleteTable(&dynamodb.DeleteTableInput{TableName: tn}) + if err != nil { + return trace.Wrap(err) + } + if wait { + return trace.Wrap( + b.svc.WaitUntilTableNotExists(&dynamodb.DescribeTableInput{TableName: tn})) + } + return nil +} + func convertError(err error) error { if err == nil { return nil diff --git a/lib/events/dynamoevents/dynamoevents_test.go b/lib/events/dynamoevents/dynamoevents_test.go index 15376f80b30ad..857b2548b45be 100644 --- a/lib/events/dynamoevents/dynamoevents_test.go +++ b/lib/events/dynamoevents/dynamoevents_test.go @@ -1,5 +1,3 @@ -// +build dynamodb - /* Copyright 2018 Gravitational, Inc. @@ -21,9 +19,12 @@ package dynamoevents import ( "fmt" + "os" + "strconv" "testing" "time" + "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/events/test" "github.com/gravitational/teleport/lib/utils" @@ -43,19 +44,26 @@ type DynamoeventsSuite struct { var _ = check.Suite(&DynamoeventsSuite{}) func (s *DynamoeventsSuite) SetUpSuite(c *check.C) { - utils.InitLoggerForTests() + utils.InitLoggerForTests(testing.Verbose()) + + testEnabled := os.Getenv(teleport.AWSRunTests) + if ok, _ := strconv.ParseBool(testEnabled); !ok { + c.Skip("Skipping AWS-dependent test suite.") + } + fakeClock := clockwork.NewFakeClock() log, err := New(Config{ - Region: "us-west-1", - Tablename: fmt.Sprintf("teleport-test-%v", uuid.New()), - Clock: fakeClock, - UID: utils.NewFakeUID(), + Region: "us-west-1", + Tablename: fmt.Sprintf("teleport-test-%v", uuid.New()), + Clock: fakeClock, + UIDGenerator: utils.NewFakeUID(), }) c.Assert(err, check.IsNil) s.log = log s.EventsSuite.Log = log s.EventsSuite.Clock = fakeClock s.EventsSuite.QueryDelay = time.Second + } func (s *DynamoeventsSuite) SetUpTest(c *check.C) { diff --git a/lib/events/emitter.go b/lib/events/emitter.go new file mode 100644 index 0000000000000..0fdcce62d22b1 --- /dev/null +++ b/lib/events/emitter.go @@ -0,0 +1,619 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "context" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + log "github.com/sirupsen/logrus" +) + +// CheckingEmitterConfig provides parameters for emitter +type CheckingEmitterConfig struct { + // Inner emits events to the underlying store + Inner Emitter + // Clock is a clock interface, used in tests + Clock clockwork.Clock + // UIDGenerator is unique ID generator + UIDGenerator utils.UID +} + +// NewCheckingEmitter returns emitter that checks +// that all required fields are properly set +func NewCheckingEmitter(cfg CheckingEmitterConfig) (*CheckingEmitter, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + return &CheckingEmitter{ + CheckingEmitterConfig: cfg, + }, nil +} + +// CheckingEmitter ensures that event fields have been set properly +// and reports statistics for every wrapper +type CheckingEmitter struct { + CheckingEmitterConfig +} + +// CheckAndSetDefaults checks and sets default values +func (w *CheckingEmitterConfig) CheckAndSetDefaults() error { + if w.Inner == nil { + return trace.BadParameter("missing parameter Inner") + } + if w.Clock == nil { + w.Clock = clockwork.NewRealClock() + } + if w.UIDGenerator == nil { + w.UIDGenerator = utils.NewRealUID() + } + return nil +} + +// EmitAuditEvent emits audit event +func (r *CheckingEmitter) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + if err := CheckAndSetEventFields(event, r.Clock, r.UIDGenerator); err != nil { + log.WithError(err).Errorf("Failed to emit audit event.") + auditFailedEmit.Inc() + return trace.Wrap(err) + } + if err := r.Inner.EmitAuditEvent(ctx, event); err != nil { + auditFailedEmit.Inc() + log.WithError(err).Errorf("Failed to emit audit event.") + return trace.Wrap(err) + } + return nil +} + +// CheckAndSetEventFields updates passed event fields with additional information +// common for all event types such as unique IDs, timestamps, codes, etc. +// +// This method is a "final stop" for various audit log implementations for +// updating event fields before it gets persisted in the backend. +func CheckAndSetEventFields(event AuditEvent, clock clockwork.Clock, uid utils.UID) error { + if event.GetType() == "" { + return trace.BadParameter("missing mandatory event type field") + } + if event.GetCode() == "" && event.GetType() != SessionPrintEvent { + return trace.BadParameter("missing mandatory event code field for %v event", event.GetType()) + } + if event.GetID() == "" && event.GetType() != SessionPrintEvent { + event.SetID(uid.New()) + } + if event.GetTime().IsZero() { + event.SetTime(clock.Now().UTC().Round(time.Millisecond)) + } + return nil +} + +// DiscardStream returns a stream that discards all events +type DiscardStream struct { +} + +// Write discards data +func (*DiscardStream) Write(p []byte) (n int, err error) { + return len(p), nil +} + +// Status returns a channel that always blocks +func (*DiscardStream) Status() <-chan StreamStatus { + return nil +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (*DiscardStream) Done() <-chan struct{} { + return nil +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (*DiscardStream) Close(ctx context.Context) error { + return nil +} + +// Complete does nothing +func (*DiscardStream) Complete(ctx context.Context) error { + return nil +} + +// EmitAuditEvent discards audit event +func (*DiscardStream) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + log.Debugf("Dicarding stream event: %v", event) + return nil +} + +// NewDiscardEmitter returns a no-op discard emitter +func NewDiscardEmitter() *DiscardEmitter { + return &DiscardEmitter{} +} + +// DiscardEmitter discards all events +type DiscardEmitter struct { +} + +// EmitAuditEvent discards audit event +func (*DiscardEmitter) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + log.Debugf("Dicarding event: %v", event) + return nil +} + +// CreateAuditStream creates a stream that discards all events +func (*DiscardEmitter) CreateAuditStream(ctx context.Context, sid session.ID) (Stream, error) { + return &DiscardStream{}, nil +} + +// ResumeAuditStream resumes a stream that discards all events +func (*DiscardEmitter) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) { + return &DiscardStream{}, nil +} + +// NewLoggingEmitter returns an emitter that logs all events to the console +// with the info level +func NewLoggingEmitter() *LoggingEmitter { + return &LoggingEmitter{} +} + +// LoggingEmitter logs all events with info level +type LoggingEmitter struct { +} + +// EmitAuditEvent logs audit event, skips session print events +// and session disk events, because they are very verbose +func (*LoggingEmitter) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + switch event.GetType() { + case ResizeEvent, SessionDiskEvent, SessionPrintEvent, "": + return nil + } + + data, err := utils.FastMarshal(event) + if err != nil { + return trace.Wrap(err) + } + + var fields log.Fields + err = utils.FastUnmarshal(data, &fields) + if err != nil { + return trace.Wrap(err) + } + fields[trace.Component] = teleport.Component(teleport.ComponentAuditLog) + + log.WithFields(fields).Infof(event.GetType()) + return nil +} + +// NewMultiEmitter returns emitter that writes +// events to all emitters +func NewMultiEmitter(emitters ...Emitter) *MultiEmitter { + return &MultiEmitter{ + emitters: emitters, + } +} + +// MultiEmitter writes audit events to multiple emitters +type MultiEmitter struct { + emitters []Emitter +} + +// EmitAuditEvent emits audit event to all emitters +func (m *MultiEmitter) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + var errors []error + for i := range m.emitters { + err := m.emitters[i].EmitAuditEvent(ctx, event) + if err != nil { + errors = append(errors, err) + } + } + return trace.NewAggregate(errors...) +} + +// StreamerAndEmitter combines streamer and emitter to create stream emitter +type StreamerAndEmitter struct { + Streamer + Emitter +} + +// CheckingStreamerConfig provides parameters for streamer +type CheckingStreamerConfig struct { + // Inner emits events to the underlying store + Inner Streamer + // Clock is a clock interface, used in tests + Clock clockwork.Clock + // UIDGenerator is unique ID generator + UIDGenerator utils.UID +} + +// NewCheckingStream wraps stream and makes sure event UIDs and timing are in place +func NewCheckingStream(stream Stream, clock clockwork.Clock) Stream { + return &CheckingStream{ + stream: stream, + clock: clock, + uidGenerator: utils.NewRealUID(), + } +} + +// NewCheckingStreamer returns streamer that checks +// that all required fields are properly set +func NewCheckingStreamer(cfg CheckingStreamerConfig) (*CheckingStreamer, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + return &CheckingStreamer{ + CheckingStreamerConfig: cfg, + }, nil +} + +// CheckingStreamer ensures that event fields have been set properly +// and reports statistics for every wrapper +type CheckingStreamer struct { + CheckingStreamerConfig +} + +// CreateAuditStream creates audit event stream +func (s *CheckingStreamer) CreateAuditStream(ctx context.Context, sid session.ID) (Stream, error) { + stream, err := s.Inner.CreateAuditStream(ctx, sid) + if err != nil { + return nil, trace.Wrap(err) + } + return &CheckingStream{ + clock: s.CheckingStreamerConfig.Clock, + uidGenerator: s.CheckingStreamerConfig.UIDGenerator, + stream: stream, + }, nil +} + +// ResumeAuditStream resumes audit event stream +func (s *CheckingStreamer) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) { + stream, err := s.Inner.ResumeAuditStream(ctx, sid, uploadID) + if err != nil { + return nil, trace.Wrap(err) + } + return &CheckingStream{ + clock: s.CheckingStreamerConfig.Clock, + uidGenerator: s.CheckingStreamerConfig.UIDGenerator, + stream: stream, + }, nil +} + +// CheckAndSetDefaults checks and sets default values +func (w *CheckingStreamerConfig) CheckAndSetDefaults() error { + if w.Inner == nil { + return trace.BadParameter("missing parameter Inner") + } + if w.Clock == nil { + w.Clock = clockwork.NewRealClock() + } + if w.UIDGenerator == nil { + w.UIDGenerator = utils.NewRealUID() + } + return nil +} + +// CheckingStream verifies every event +type CheckingStream struct { + stream Stream + clock clockwork.Clock + uidGenerator utils.UID +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (s *CheckingStream) Close(ctx context.Context) error { + return s.stream.Close(ctx) +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (s *CheckingStream) Done() <-chan struct{} { + return s.stream.Done() +} + +// Status returns channel receiving updates about stream status +// last event index that was uploaded and upload ID +func (s *CheckingStream) Status() <-chan StreamStatus { + return s.stream.Status() +} + +// Complete closes the stream and marks it finalized +func (s *CheckingStream) Complete(ctx context.Context) error { + return s.stream.Complete(ctx) +} + +// EmitAuditEvent emits audit event +func (s *CheckingStream) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + if err := CheckAndSetEventFields(event, s.clock, s.uidGenerator); err != nil { + log.WithError(err).Errorf("Failed to emit audit event %v(%v).", event.GetType(), event.GetCode()) + auditFailedEmit.Inc() + return trace.Wrap(err) + } + if err := s.stream.EmitAuditEvent(ctx, event); err != nil { + auditFailedEmit.Inc() + log.WithError(err).Errorf("Failed to emit audit event %v(%v).", event.GetType(), event.GetCode()) + return trace.Wrap(err) + } + return nil +} + +// NewTeeStreamer returns a streamer that forwards non print event +// to emitter in addition to sending them to the stream +func NewTeeStreamer(streamer Streamer, emitter Emitter) *TeeStreamer { + return &TeeStreamer{ + Emitter: emitter, + streamer: streamer, + } +} + +// CreateAuditStream creates audit event stream +func (t *TeeStreamer) CreateAuditStream(ctx context.Context, sid session.ID) (Stream, error) { + stream, err := t.streamer.CreateAuditStream(ctx, sid) + if err != nil { + return nil, trace.Wrap(err) + } + return &TeeStream{stream: stream, emitter: t.Emitter}, nil + +} + +// ResumeAuditStream resumes audit event stream +func (t *TeeStreamer) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) { + stream, err := t.streamer.ResumeAuditStream(ctx, sid, uploadID) + if err != nil { + return nil, trace.Wrap(err) + } + return &TeeStream{stream: stream, emitter: t.Emitter}, nil +} + +// TeeStreamer creates streams that forwards non print events +// to emitter +type TeeStreamer struct { + Emitter + streamer Streamer +} + +// TeeStream sends non print events to emitter +// in addition to the stream itself +type TeeStream struct { + emitter Emitter + stream Stream +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (t *TeeStream) Done() <-chan struct{} { + return t.stream.Done() +} + +// Status returns channel receiving updates about stream status +// last event index that was uploaded and upload ID +func (t *TeeStream) Status() <-chan StreamStatus { + return t.stream.Status() +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (t *TeeStream) Close(ctx context.Context) error { + return t.stream.Close(ctx) +} + +// Complete closes the stream and marks it finalized +func (t *TeeStream) Complete(ctx context.Context) error { + return t.stream.Complete(ctx) +} + +// EmitAuditEvent emits audit events and forwards session control events +// to the audit log +func (t *TeeStream) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + var errors []error + if err := t.stream.EmitAuditEvent(ctx, event); err != nil { + errors = append(errors, err) + } + // Forward session events except the ones that pollute global logs + // terminal resize, print and disk access. + switch event.GetType() { + case ResizeEvent, SessionDiskEvent, SessionPrintEvent, "": + return trace.NewAggregate(errors...) + } + if err := t.emitter.EmitAuditEvent(ctx, event); err != nil { + errors = append(errors, err) + } + return trace.NewAggregate(errors...) +} + +// NewCallbackStreamer returns streamer that invokes callback on every +// action, is used in tests to inject failures +func NewCallbackStreamer(cfg CallbackStreamerConfig) (*CallbackStreamer, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + return &CallbackStreamer{ + CallbackStreamerConfig: cfg, + }, nil +} + +// CallbackStreamerConfig provides parameters for streamer +type CallbackStreamerConfig struct { + // Inner emits events to the underlying store + Inner Streamer + // OnCreateAuditStream is called on create audit stream + OnCreateAuditStream func(ctx context.Context, sid session.ID, inner Streamer) (Stream, error) + // OnResumeAuditStream is called on resuming audit stream + OnResumeAuditStream func(ctx context.Context, sid session.ID, uploadID string, inner Streamer) (Stream, error) + // OnEmitAuditEvent is called on emit audit event on a stream + OnEmitAuditEvent func(ctx context.Context, sid session.ID, event AuditEvent) error +} + +// CheckAndSetDefaults checks and sets default values +func (c *CallbackStreamerConfig) CheckAndSetDefaults() error { + if c.Inner == nil { + return trace.BadParameter("missing parameter Inner") + } + return nil +} + +// CallbackStreamer ensures that event fields have been set properly +// and reports statistics for every wrapper +type CallbackStreamer struct { + CallbackStreamerConfig +} + +// CreateAuditStream creates audit event stream +func (s *CallbackStreamer) CreateAuditStream(ctx context.Context, sid session.ID) (Stream, error) { + var stream Stream + var err error + if s.OnCreateAuditStream != nil { + stream, err = s.OnCreateAuditStream(ctx, sid, s.Inner) + } else { + stream, err = s.Inner.CreateAuditStream(ctx, sid) + } + if err != nil { + return nil, trace.Wrap(err) + } + return &CallbackStream{ + stream: stream, + streamer: s, + }, nil +} + +// ResumeAuditStream resumes audit event stream +func (s *CallbackStreamer) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) { + var stream Stream + var err error + if s.OnResumeAuditStream != nil { + stream, err = s.OnResumeAuditStream(ctx, sid, uploadID, s.Inner) + } else { + stream, err = s.Inner.ResumeAuditStream(ctx, sid, uploadID) + } + if err != nil { + return nil, trace.Wrap(err) + } + return &CallbackStream{ + stream: stream, + sessionID: sid, + streamer: s, + }, nil +} + +// CallbackStream call +type CallbackStream struct { + stream Stream + sessionID session.ID + streamer *CallbackStreamer +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (s *CallbackStream) Close(ctx context.Context) error { + return s.stream.Close(ctx) +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (s *CallbackStream) Done() <-chan struct{} { + return s.stream.Done() +} + +// Status returns channel receiving updates about stream status +// last event index that was uploaded and upload ID +func (s *CallbackStream) Status() <-chan StreamStatus { + return s.stream.Status() +} + +// Complete closes the stream and marks it finalized +func (s *CallbackStream) Complete(ctx context.Context) error { + return s.stream.Complete(ctx) +} + +// EmitAuditEvent emits audit event +func (s *CallbackStream) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + if s.streamer.OnEmitAuditEvent != nil { + if err := s.streamer.OnEmitAuditEvent(ctx, s.sessionID, event); err != nil { + return trace.Wrap(err) + } + } + return s.stream.EmitAuditEvent(ctx, event) +} + +// NewReportingStreamer reports upload events +// to the eventsC channel, if the channel is not nil. +func NewReportingStreamer(streamer Streamer, eventsC chan UploadEvent) *ReportingStreamer { + return &ReportingStreamer{ + streamer: streamer, + eventsC: eventsC, + } +} + +// ReportingStreamer reports upload events +// to the eventsC channel, if the channel is not nil. +type ReportingStreamer struct { + streamer Streamer + eventsC chan UploadEvent +} + +// CreateAuditStream creates audit event stream +func (s *ReportingStreamer) CreateAuditStream(ctx context.Context, sid session.ID) (Stream, error) { + stream, err := s.streamer.CreateAuditStream(ctx, sid) + if err != nil { + return nil, trace.Wrap(err) + } + return &ReportingStream{ + Stream: stream, + eventsC: s.eventsC, + sessionID: sid, + }, nil +} + +// ResumeAuditStream resumes audit event stream +func (s *ReportingStreamer) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) { + stream, err := s.streamer.ResumeAuditStream(ctx, sid, uploadID) + if err != nil { + return nil, trace.Wrap(err) + } + return &ReportingStream{ + Stream: stream, + sessionID: sid, + eventsC: s.eventsC, + }, nil +} + +// ReportingStream reports status of uploads to the events channel +type ReportingStream struct { + Stream + sessionID session.ID + eventsC chan UploadEvent +} + +// Complete closes the stream and marks it finalized +func (s *ReportingStream) Complete(ctx context.Context) error { + err := s.Stream.Complete(ctx) + if s.eventsC == nil { + return trace.Wrap(err) + } + select { + case s.eventsC <- UploadEvent{ + SessionID: string(s.sessionID), + Error: err, + }: + default: + log.Warningf("Skip send event on a blocked channel.") + } + return trace.Wrap(err) +} diff --git a/lib/events/emitter_test.go b/lib/events/emitter_test.go new file mode 100644 index 0000000000000..48b1f523b6e6d --- /dev/null +++ b/lib/events/emitter_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/gravitational/teleport/lib/session" + + "github.com/stretchr/testify/assert" +) + +// TestProtoStreamer tests edge cases of proto streamer implementation +func TestProtoStreamer(t *testing.T) { + type testCase struct { + name string + minUploadBytes int64 + events []AuditEvent + err error + } + testCases := []testCase{ + { + name: "5MB similar to S3 min size in bytes", + minUploadBytes: 1024 * 1024 * 5, + events: GenerateTestSession(SessionParams{PrintEvents: 1}), + }, + { + name: "get a part per message", + minUploadBytes: 1, + events: GenerateTestSession(SessionParams{PrintEvents: 1}), + }, + { + name: "small load test with some uneven numbers", + minUploadBytes: 1024, + events: GenerateTestSession(SessionParams{PrintEvents: 1000}), + }, + { + name: "no events", + minUploadBytes: 1024*1024*5 + 64*1024, + }, + { + name: "one event using the whole part", + minUploadBytes: 1, + events: GenerateTestSession(SessionParams{PrintEvents: 0})[:1], + }, + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + for i, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + uploader := NewMemoryUploader() + streamer, err := NewProtoStreamer(ProtoStreamerConfig{ + Uploader: uploader, + MinUploadBytes: tc.minUploadBytes, + }) + assert.Nil(t, err) + + sid := session.ID(fmt.Sprintf("test-%v", i)) + stream, err := streamer.CreateAuditStream(ctx, sid) + assert.Nil(t, err) + + events := tc.events + for _, event := range events { + err := stream.EmitAuditEvent(ctx, event) + if tc.err != nil { + assert.IsType(t, tc.err, err) + return + } + assert.Nil(t, err) + } + err = stream.Complete(ctx) + assert.Nil(t, err) + + var outEvents []AuditEvent + uploads, err := uploader.ListUploads(ctx) + assert.Nil(t, err) + parts, err := uploader.GetParts(uploads[0].ID) + assert.Nil(t, err) + + for _, part := range parts { + reader := NewProtoReader(bytes.NewReader(part)) + out, err := reader.ReadAll(ctx) + assert.Nil(t, err, "part crash %#v", part) + outEvents = append(outEvents, out...) + } + + assert.Equal(t, events, outEvents) + }) + } +} diff --git a/lib/events/events.pb.go b/lib/events/events.pb.go new file mode 100644 index 0000000000000..e75d0757c984a --- /dev/null +++ b/lib/events/events.pb.go @@ -0,0 +1,17522 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: events.proto + +package events + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/golang/protobuf/ptypes/struct" +import _ "github.com/golang/protobuf/ptypes/timestamp" + +import time "time" + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Metadata is a common event metadata +type Metadata struct { + // Index is a monotonicaly incremented index in the event sequence + Index int64 `protobuf:"varint,1,opt,name=Index,proto3" json:"ei"` + // Type is the event type + Type string `protobuf:"bytes,2,opt,name=Type,proto3" json:"event"` + // ID is a unique event identifier + ID string `protobuf:"bytes,3,opt,name=ID,proto3" json:"uid,omitempty"` + // Code is a unique event code + Code string `protobuf:"bytes,4,opt,name=Code,proto3" json:"code,omitempty"` + // Time is event time + Time time.Time `protobuf:"bytes,5,opt,name=Time,stdtime" json:"time"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{0} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(dst, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +// SesssionMetadata is a common session event metadata +type SessionMetadata struct { + // SessionID is a unique UUID of the session. + SessionID string `protobuf:"bytes,1,opt,name=SessionID,proto3" json:"sid"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionMetadata) Reset() { *m = SessionMetadata{} } +func (m *SessionMetadata) String() string { return proto.CompactTextString(m) } +func (*SessionMetadata) ProtoMessage() {} +func (*SessionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{1} +} +func (m *SessionMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionMetadata.Merge(dst, src) +} +func (m *SessionMetadata) XXX_Size() int { + return m.Size() +} +func (m *SessionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_SessionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionMetadata proto.InternalMessageInfo + +// UserMetadata is a common user event metadata +type UserMetadata struct { + // User is teleport user name + User string `protobuf:"bytes,1,opt,name=User,proto3" json:"user"` + // Login is OS login + Login string `protobuf:"bytes,2,opt,name=Login,proto3" json:"login,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserMetadata) Reset() { *m = UserMetadata{} } +func (m *UserMetadata) String() string { return proto.CompactTextString(m) } +func (*UserMetadata) ProtoMessage() {} +func (*UserMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{2} +} +func (m *UserMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UserMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserMetadata.Merge(dst, src) +} +func (m *UserMetadata) XXX_Size() int { + return m.Size() +} +func (m *UserMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_UserMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_UserMetadata proto.InternalMessageInfo + +// Server is a server metadata +type ServerMetadata struct { + // ServerNamespace is a namespace of the server event + ServerNamespace string `protobuf:"bytes,1,opt,name=ServerNamespace,proto3" json:"namespace,omitempty"` + // ServerID is the UUID of the server the session occurred on. + ServerID string `protobuf:"bytes,2,opt,name=ServerID,proto3" json:"server_id"` + // ServerHostname is the hostname of the server the session occurred on. + ServerHostname string `protobuf:"bytes,3,opt,name=ServerHostname,proto3" json:"server_hostname,omitempty"` + // ServerAddr is the address of the server the session occurred on. + ServerAddr string `protobuf:"bytes,4,opt,name=ServerAddr,proto3" json:"server_addr,omitempty"` + // ServerLabels are the labels (static and dynamic) of the server the + // session occurred on. + ServerLabels map[string]string `protobuf:"bytes,5,rep,name=ServerLabels" json:"server_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerMetadata) Reset() { *m = ServerMetadata{} } +func (m *ServerMetadata) String() string { return proto.CompactTextString(m) } +func (*ServerMetadata) ProtoMessage() {} +func (*ServerMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{3} +} +func (m *ServerMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServerMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ServerMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerMetadata.Merge(dst, src) +} +func (m *ServerMetadata) XXX_Size() int { + return m.Size() +} +func (m *ServerMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_ServerMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerMetadata proto.InternalMessageInfo + +// Connection contains connection infro +type ConnectionMetadata struct { + // LocalAddr is a target address on the host + LocalAddr string `protobuf:"bytes,1,opt,name=LocalAddr,proto3" json:"addr.local,omitempty"` + // RemoteAddr is a client (user's) address + RemoteAddr string `protobuf:"bytes,2,opt,name=RemoteAddr,proto3" json:"addr.remote,omitempty"` + // Protocol specifies protocol that was captured + Protocol string `protobuf:"bytes,3,opt,name=Protocol,proto3" json:"proto,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectionMetadata) Reset() { *m = ConnectionMetadata{} } +func (m *ConnectionMetadata) String() string { return proto.CompactTextString(m) } +func (*ConnectionMetadata) ProtoMessage() {} +func (*ConnectionMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{4} +} +func (m *ConnectionMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ConnectionMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionMetadata.Merge(dst, src) +} +func (m *ConnectionMetadata) XXX_Size() int { + return m.Size() +} +func (m *ConnectionMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionMetadata proto.InternalMessageInfo + +// SessionStart is a session start event +type SessionStart struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,5,opt,name=Connection,embedded=Connection" json:""` + // TerminalSize is expressed as 'W:H' + TerminalSize string `protobuf:"bytes,6,opt,name=TerminalSize,proto3" json:"size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionStart) Reset() { *m = SessionStart{} } +func (m *SessionStart) String() string { return proto.CompactTextString(m) } +func (*SessionStart) ProtoMessage() {} +func (*SessionStart) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{5} +} +func (m *SessionStart) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionStart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionStart.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionStart) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionStart.Merge(dst, src) +} +func (m *SessionStart) XXX_Size() int { + return m.Size() +} +func (m *SessionStart) XXX_DiscardUnknown() { + xxx_messageInfo_SessionStart.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionStart proto.InternalMessageInfo + +// SessionJoin emitted when another user joins a session +type SessionJoin struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,5,opt,name=Connection,embedded=Connection" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionJoin) Reset() { *m = SessionJoin{} } +func (m *SessionJoin) String() string { return proto.CompactTextString(m) } +func (*SessionJoin) ProtoMessage() {} +func (*SessionJoin) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{6} +} +func (m *SessionJoin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionJoin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionJoin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionJoin) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionJoin.Merge(dst, src) +} +func (m *SessionJoin) XXX_Size() int { + return m.Size() +} +func (m *SessionJoin) XXX_DiscardUnknown() { + xxx_messageInfo_SessionJoin.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionJoin proto.InternalMessageInfo + +// SessionPrint event happens every time a write occurs to +// temirnal I/O during a session +type SessionPrint struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ChunkIndex is a monotonicaly incremented index for ordering print events + ChunkIndex int64 `protobuf:"varint,2,opt,name=ChunkIndex,proto3" json:"ci"` + // Data is data transferred, it is not marshaled to JSON format + Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"-"` + // Bytes says how many bytes have been written into the session + // during "print" event + Bytes int64 `protobuf:"varint,4,opt,name=Bytes,proto3" json:"bytes"` + // DelayMilliseconds is the delay in milliseconds from the start of the session + DelayMilliseconds int64 `protobuf:"varint,5,opt,name=DelayMilliseconds,proto3" json:"ms"` + // Offset is the offset in bytes in the session file + Offset int64 `protobuf:"varint,6,opt,name=Offset,proto3" json:"offset"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionPrint) Reset() { *m = SessionPrint{} } +func (m *SessionPrint) String() string { return proto.CompactTextString(m) } +func (*SessionPrint) ProtoMessage() {} +func (*SessionPrint) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{7} +} +func (m *SessionPrint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionPrint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionPrint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionPrint) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionPrint.Merge(dst, src) +} +func (m *SessionPrint) XXX_Size() int { + return m.Size() +} +func (m *SessionPrint) XXX_DiscardUnknown() { + xxx_messageInfo_SessionPrint.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionPrint proto.InternalMessageInfo + +// SessionReject event happens when a user hits the limit of maximum +// concurrent connections in the cluster `max_connections` +type SessionReject struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,3,opt,name=Server,embedded=Server" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,4,opt,name=Connection,embedded=Connection" json:""` + // Reason is a field that specifies reason for event, e.g. in disconnect + // event it explains why server disconnected the client + Reason string `protobuf:"bytes,5,opt,name=Reason,proto3" json:"reason"` + // Maximum is an event field specifying a maximal value (e.g. the value + // of `max_connections` for a `session.rejected` event). + Maximum int64 `protobuf:"varint,6,opt,name=Maximum,proto3" json:"max"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionReject) Reset() { *m = SessionReject{} } +func (m *SessionReject) String() string { return proto.CompactTextString(m) } +func (*SessionReject) ProtoMessage() {} +func (*SessionReject) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{8} +} +func (m *SessionReject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionReject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionReject.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionReject) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionReject.Merge(dst, src) +} +func (m *SessionReject) XXX_Size() int { + return m.Size() +} +func (m *SessionReject) XXX_DiscardUnknown() { + xxx_messageInfo_SessionReject.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionReject proto.InternalMessageInfo + +// Resize means that some user resized PTY on the client +type Resize struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,4,opt,name=Connection,embedded=Connection" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,5,opt,name=Server,embedded=Server" json:""` + // TerminalSize is expressed as 'W:H' + TerminalSize string `protobuf:"bytes,6,opt,name=TerminalSize,proto3" json:"size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resize) Reset() { *m = Resize{} } +func (m *Resize) String() string { return proto.CompactTextString(m) } +func (*Resize) ProtoMessage() {} +func (*Resize) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{9} +} +func (m *Resize) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Resize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Resize.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Resize) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resize.Merge(dst, src) +} +func (m *Resize) XXX_Size() int { + return m.Size() +} +func (m *Resize) XXX_DiscardUnknown() { + xxx_messageInfo_Resize.DiscardUnknown(m) +} + +var xxx_messageInfo_Resize proto.InternalMessageInfo + +// SessionEnd is a session end event +type SessionEnd struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,4,opt,name=Connection,embedded=Connection" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,5,opt,name=Server,embedded=Server" json:""` + // EnhancedRecording is used to indicate if the recording was an + // enhanced recording or not. + EnhancedRecording bool `protobuf:"varint,6,opt,name=EnhancedRecording,proto3" json:"enhanced_recording"` + // Interactive is used to indicate if the session was interactive + // (has PTY attached) or not (exec session). + Interactive bool `protobuf:"varint,7,opt,name=Interactive,proto3" json:"interactive"` + // Participants is a list of participants in the session. + Participants []string `protobuf:"bytes,8,rep,name=Participants" json:"participants"` + // StartTime is the timestamp at which the session began. + StartTime time.Time `protobuf:"bytes,9,opt,name=StartTime,stdtime" json:"session_start,omitempty"` + // EndTime is the timestamp at which the session ended. + EndTime time.Time `protobuf:"bytes,10,opt,name=EndTime,stdtime" json:"session_stop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionEnd) Reset() { *m = SessionEnd{} } +func (m *SessionEnd) String() string { return proto.CompactTextString(m) } +func (*SessionEnd) ProtoMessage() {} +func (*SessionEnd) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{10} +} +func (m *SessionEnd) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionEnd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionEnd.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionEnd) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionEnd.Merge(dst, src) +} +func (m *SessionEnd) XXX_Size() int { + return m.Size() +} +func (m *SessionEnd) XXX_DiscardUnknown() { + xxx_messageInfo_SessionEnd.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionEnd proto.InternalMessageInfo + +// BPFMetadata is a common BPF process metadata +type BPFMetadata struct { + // PID is the ID of the process. + PID uint64 `protobuf:"varint,1,opt,name=PID,proto3" json:"pid"` + // CgroupID is the internal cgroupv2 ID of the event. + CgroupID uint64 `protobuf:"varint,2,opt,name=CgroupID,proto3" json:"cgroup_id"` + // Program is name of the executable. + Program string `protobuf:"bytes,3,opt,name=Program,proto3" json:"program"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BPFMetadata) Reset() { *m = BPFMetadata{} } +func (m *BPFMetadata) String() string { return proto.CompactTextString(m) } +func (*BPFMetadata) ProtoMessage() {} +func (*BPFMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{11} +} +func (m *BPFMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BPFMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BPFMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BPFMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BPFMetadata.Merge(dst, src) +} +func (m *BPFMetadata) XXX_Size() int { + return m.Size() +} +func (m *BPFMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BPFMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BPFMetadata proto.InternalMessageInfo + +// Status contains common command or operation status fields +type Status struct { + // Success indicates the success or failure of the operation + Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"success"` + // Error includes system error message for the failed attempt + Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"error,omitempty"` + // UserMessage is a user-friendly message for successfull or unsuccessfull auth attempt + UserMessage string `protobuf:"bytes,3,opt,name=UserMessage,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{12} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(dst, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +// SessionCommand is a session command event +type SessionCommand struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // BPFMetadata is a common BPF subsystem metadata + BPFMetadata `protobuf:"bytes,5,opt,name=BPF,embedded=BPF" json:""` + // PPID is the PID of the parent process. + PPID uint64 `protobuf:"varint,6,opt,name=PPID,proto3" json:"ppid"` + // Path is the full path to the executable. + Path string `protobuf:"bytes,7,opt,name=Path,proto3" json:"path"` + // Argv is the list of arguments to the program. Note, the first element does + // not contain the name of the process. + Argv []string `protobuf:"bytes,8,rep,name=Argv" json:"argv"` + // ReturnCode is the return code of execve. + ReturnCode int32 `protobuf:"varint,9,opt,name=ReturnCode,proto3" json:"return_code"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionCommand) Reset() { *m = SessionCommand{} } +func (m *SessionCommand) String() string { return proto.CompactTextString(m) } +func (*SessionCommand) ProtoMessage() {} +func (*SessionCommand) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{13} +} +func (m *SessionCommand) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionCommand) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionCommand.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionCommand) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionCommand.Merge(dst, src) +} +func (m *SessionCommand) XXX_Size() int { + return m.Size() +} +func (m *SessionCommand) XXX_DiscardUnknown() { + xxx_messageInfo_SessionCommand.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionCommand proto.InternalMessageInfo + +// SessionDisk is a session disk access event +type SessionDisk struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // BPFMetadata is a common BPF subsystem metadata + BPFMetadata `protobuf:"bytes,5,opt,name=BPF,embedded=BPF" json:""` + // Path is the full path to the executable. + Path string `protobuf:"bytes,6,opt,name=Path,proto3" json:"path"` + // Flags are the flags passed to open. + Flags int32 `protobuf:"varint,7,opt,name=Flags,proto3" json:"flags"` + // ReturnCode is the return code of disk open + ReturnCode int32 `protobuf:"varint,8,opt,name=ReturnCode,proto3" json:"return_code"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionDisk) Reset() { *m = SessionDisk{} } +func (m *SessionDisk) String() string { return proto.CompactTextString(m) } +func (*SessionDisk) ProtoMessage() {} +func (*SessionDisk) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{14} +} +func (m *SessionDisk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionDisk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionDisk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionDisk) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionDisk.Merge(dst, src) +} +func (m *SessionDisk) XXX_Size() int { + return m.Size() +} +func (m *SessionDisk) XXX_DiscardUnknown() { + xxx_messageInfo_SessionDisk.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionDisk proto.InternalMessageInfo + +// SessionNetwork is a network event +type SessionNetwork struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // BPFMetadata is a common BPF subsystem metadata + BPFMetadata `protobuf:"bytes,5,opt,name=BPF,embedded=BPF" json:""` + // SrcAddr is the source IP address of the connection. + SrcAddr string `protobuf:"bytes,6,opt,name=SrcAddr,proto3" json:"src_addr"` + // DstAddr is the destination IP address of the connection. + DstAddr string `protobuf:"bytes,7,opt,name=DstAddr,proto3" json:"dst_addr"` + // DstPort is the destination port of the connection. + DstPort int32 `protobuf:"varint,8,opt,name=DstPort,proto3" json:"dst_port"` + // TCPVersion is the version of TCP (4 or 6). + TCPVersion int32 `protobuf:"varint,9,opt,name=TCPVersion,proto3" json:"version"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionNetwork) Reset() { *m = SessionNetwork{} } +func (m *SessionNetwork) String() string { return proto.CompactTextString(m) } +func (*SessionNetwork) ProtoMessage() {} +func (*SessionNetwork) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{15} +} +func (m *SessionNetwork) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionNetwork) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionNetwork.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionNetwork) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionNetwork.Merge(dst, src) +} +func (m *SessionNetwork) XXX_Size() int { + return m.Size() +} +func (m *SessionNetwork) XXX_DiscardUnknown() { + xxx_messageInfo_SessionNetwork.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionNetwork proto.InternalMessageInfo + +// SessionData is emitted to report session data usage. +type SessionData struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,5,opt,name=Connection,embedded=Connection" json:""` + // BytesTransmitted is the amount of bytes transmitted + BytesTransmitted uint64 `protobuf:"varint,6,opt,name=BytesTransmitted,proto3" json:"tx"` + // BytesReceived is the amount of bytes received + BytesReceived uint64 `protobuf:"varint,7,opt,name=BytesReceived,proto3" json:"rx"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionData) Reset() { *m = SessionData{} } +func (m *SessionData) String() string { return proto.CompactTextString(m) } +func (*SessionData) ProtoMessage() {} +func (*SessionData) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{16} +} +func (m *SessionData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionData.Merge(dst, src) +} +func (m *SessionData) XXX_Size() int { + return m.Size() +} +func (m *SessionData) XXX_DiscardUnknown() { + xxx_messageInfo_SessionData.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionData proto.InternalMessageInfo + +// SessionLeave is emitted to report that a user left the session +type SessionLeave struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,3,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,5,opt,name=Connection,embedded=Connection" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SessionLeave) Reset() { *m = SessionLeave{} } +func (m *SessionLeave) String() string { return proto.CompactTextString(m) } +func (*SessionLeave) ProtoMessage() {} +func (*SessionLeave) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{17} +} +func (m *SessionLeave) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionLeave) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SessionLeave.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SessionLeave) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionLeave.Merge(dst, src) +} +func (m *SessionLeave) XXX_Size() int { + return m.Size() +} +func (m *SessionLeave) XXX_DiscardUnknown() { + xxx_messageInfo_SessionLeave.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionLeave proto.InternalMessageInfo + +// UserLogin records a successfull or failed user login event +type UserLogin struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // Status contains common command or operation status fields + Status `protobuf:"bytes,3,opt,name=Status,embedded=Status" json:""` + // Method is the event field indicating how the login was performed + Method string `protobuf:"bytes,4,opt,name=Method,proto3" json:"method,omitempty"` + // IdentityAttributes is a map of user attributes received from identity provider + IdentityAttributes *Struct `protobuf:"bytes,5,opt,name=IdentityAttributes,casttype=Struct" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserLogin) Reset() { *m = UserLogin{} } +func (m *UserLogin) String() string { return proto.CompactTextString(m) } +func (*UserLogin) ProtoMessage() {} +func (*UserLogin) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{18} +} +func (m *UserLogin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserLogin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserLogin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UserLogin) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserLogin.Merge(dst, src) +} +func (m *UserLogin) XXX_Size() int { + return m.Size() +} +func (m *UserLogin) XXX_DiscardUnknown() { + xxx_messageInfo_UserLogin.DiscardUnknown(m) +} + +var xxx_messageInfo_UserLogin proto.InternalMessageInfo + +// ResourceMetadata is a common resource metadata +type ResourceMetadata struct { + // ResourceName is a resource name + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"name,omitempty"` + // Expires is set if resource expires + Expires time.Time `protobuf:"bytes,2,opt,name=Expires,stdtime" json:"expires"` + // UpdatedBy if set indicates the user who modified the resource + UpdatedBy string `protobuf:"bytes,3,opt,name=UpdatedBy,proto3" json:"updated_by,omitempty"` + // TTL is a TTL of reset password token represented as duration, e.g. "10m" + // used for compatibility purposes for some events, Expires should be used instead + // as it's more useful (contains exact expiration date/time) + TTL string `protobuf:"bytes,4,opt,name=TTL,proto3" json:"ttl,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceMetadata) Reset() { *m = ResourceMetadata{} } +func (m *ResourceMetadata) String() string { return proto.CompactTextString(m) } +func (*ResourceMetadata) ProtoMessage() {} +func (*ResourceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{19} +} +func (m *ResourceMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResourceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetadata.Merge(dst, src) +} +func (m *ResourceMetadata) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetadata proto.InternalMessageInfo + +// UserCreate is emitted when the user is created or updated (upsert). +type UserCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,3,opt,name=Resource,embedded=Resource" json:""` + // Roles is a list of roles for the user. + Roles []string `protobuf:"bytes,4,rep,name=Roles" json:"roles"` + // Connector is the connector used to create the user. + Connector string `protobuf:"bytes,5,opt,name=Connector,proto3" json:"connector"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserCreate) Reset() { *m = UserCreate{} } +func (m *UserCreate) String() string { return proto.CompactTextString(m) } +func (*UserCreate) ProtoMessage() {} +func (*UserCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{20} +} +func (m *UserCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UserCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserCreate.Merge(dst, src) +} +func (m *UserCreate) XXX_Size() int { + return m.Size() +} +func (m *UserCreate) XXX_DiscardUnknown() { + xxx_messageInfo_UserCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_UserCreate proto.InternalMessageInfo + +// UserDelete is emitted when a user gets deleted +type UserDelete struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,3,opt,name=Resource,embedded=Resource" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserDelete) Reset() { *m = UserDelete{} } +func (m *UserDelete) String() string { return proto.CompactTextString(m) } +func (*UserDelete) ProtoMessage() {} +func (*UserDelete) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{21} +} +func (m *UserDelete) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserDelete.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UserDelete) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserDelete.Merge(dst, src) +} +func (m *UserDelete) XXX_Size() int { + return m.Size() +} +func (m *UserDelete) XXX_DiscardUnknown() { + xxx_messageInfo_UserDelete.DiscardUnknown(m) +} + +var xxx_messageInfo_UserDelete proto.InternalMessageInfo + +// UserPasswordChange is emitted when the user changes their own password. +type UserPasswordChange struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserPasswordChange) Reset() { *m = UserPasswordChange{} } +func (m *UserPasswordChange) String() string { return proto.CompactTextString(m) } +func (*UserPasswordChange) ProtoMessage() {} +func (*UserPasswordChange) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{22} +} +func (m *UserPasswordChange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserPasswordChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserPasswordChange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UserPasswordChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserPasswordChange.Merge(dst, src) +} +func (m *UserPasswordChange) XXX_Size() int { + return m.Size() +} +func (m *UserPasswordChange) XXX_DiscardUnknown() { + xxx_messageInfo_UserPasswordChange.DiscardUnknown(m) +} + +var xxx_messageInfo_UserPasswordChange proto.InternalMessageInfo + +// AccessRequestCreate is emitted when access request has been created or updated +type AccessRequestCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,3,opt,name=Resource,embedded=Resource" json:""` + // Roles is a list of roles for the user. + Roles []string `protobuf:"bytes,4,rep,name=Roles" json:"roles"` + // RequestID is access request ID + RequestID string `protobuf:"bytes,5,opt,name=RequestID,proto3" json:"id"` + // RequestState is access request state + RequestState string `protobuf:"bytes,6,opt,name=RequestState,proto3" json:"state"` + // Delegator is used by teleport plugins to indicate the identity + // which caused them to update state. + Delegator string `protobuf:"bytes,7,opt,name=Delegator,proto3" json:"delegator,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AccessRequestCreate) Reset() { *m = AccessRequestCreate{} } +func (m *AccessRequestCreate) String() string { return proto.CompactTextString(m) } +func (*AccessRequestCreate) ProtoMessage() {} +func (*AccessRequestCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{23} +} +func (m *AccessRequestCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AccessRequestCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AccessRequestCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AccessRequestCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccessRequestCreate.Merge(dst, src) +} +func (m *AccessRequestCreate) XXX_Size() int { + return m.Size() +} +func (m *AccessRequestCreate) XXX_DiscardUnknown() { + xxx_messageInfo_AccessRequestCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_AccessRequestCreate proto.InternalMessageInfo + +// PortForward is emitted when a user requests port forwarding. +type PortForward struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,3,opt,name=Connection,embedded=Connection" json:""` + // Status contains operation success or failure status + Status `protobuf:"bytes,4,opt,name=Status,embedded=Status" json:""` + // Addr is a target port forwarding address + Addr string `protobuf:"bytes,5,opt,name=Addr,proto3" json:"addr"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PortForward) Reset() { *m = PortForward{} } +func (m *PortForward) String() string { return proto.CompactTextString(m) } +func (*PortForward) ProtoMessage() {} +func (*PortForward) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{24} +} +func (m *PortForward) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortForward) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortForward.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PortForward) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortForward.Merge(dst, src) +} +func (m *PortForward) XXX_Size() int { + return m.Size() +} +func (m *PortForward) XXX_DiscardUnknown() { + xxx_messageInfo_PortForward.DiscardUnknown(m) +} + +var xxx_messageInfo_PortForward proto.InternalMessageInfo + +// X11Forward is emitted when a user requests X11 protocol forwarding +type X11Forward struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,3,opt,name=Connection,embedded=Connection" json:""` + // Status contains operation success or failure status + Status `protobuf:"bytes,4,opt,name=Status,embedded=Status" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *X11Forward) Reset() { *m = X11Forward{} } +func (m *X11Forward) String() string { return proto.CompactTextString(m) } +func (*X11Forward) ProtoMessage() {} +func (*X11Forward) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{25} +} +func (m *X11Forward) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *X11Forward) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_X11Forward.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *X11Forward) XXX_Merge(src proto.Message) { + xxx_messageInfo_X11Forward.Merge(dst, src) +} +func (m *X11Forward) XXX_Size() int { + return m.Size() +} +func (m *X11Forward) XXX_DiscardUnknown() { + xxx_messageInfo_X11Forward.DiscardUnknown(m) +} + +var xxx_messageInfo_X11Forward proto.InternalMessageInfo + +// CommandMetadata specifies common command fields +type CommandMetadata struct { + // Command is the executed command name + Command string `protobuf:"bytes,1,opt,name=Command,proto3" json:"command"` + // ExitCode specifies command exit code + ExitCode string `protobuf:"bytes,2,opt,name=ExitCode,proto3" json:"exitCode,omitempty"` + // Error is an optional exit error, set if command has failed + Error string `protobuf:"bytes,3,opt,name=Error,proto3" json:"exitError,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommandMetadata) Reset() { *m = CommandMetadata{} } +func (m *CommandMetadata) String() string { return proto.CompactTextString(m) } +func (*CommandMetadata) ProtoMessage() {} +func (*CommandMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{26} +} +func (m *CommandMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommandMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommandMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *CommandMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommandMetadata.Merge(dst, src) +} +func (m *CommandMetadata) XXX_Size() int { + return m.Size() +} +func (m *CommandMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_CommandMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_CommandMetadata proto.InternalMessageInfo + +// Exec specifies command exec event +type Exec struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,3,opt,name=Connection,embedded=Connection" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,4,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,5,opt,name=Server,embedded=Server" json:""` + // CommandMetadata is a common command metadata + CommandMetadata `protobuf:"bytes,6,opt,name=Command,embedded=Command" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exec) Reset() { *m = Exec{} } +func (m *Exec) String() string { return proto.CompactTextString(m) } +func (*Exec) ProtoMessage() {} +func (*Exec) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{27} +} +func (m *Exec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Exec) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exec.Merge(dst, src) +} +func (m *Exec) XXX_Size() int { + return m.Size() +} +func (m *Exec) XXX_DiscardUnknown() { + xxx_messageInfo_Exec.DiscardUnknown(m) +} + +var xxx_messageInfo_Exec proto.InternalMessageInfo + +// SCP is emitted when data transfer has occurred between server and client +type SCP struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,3,opt,name=Connection,embedded=Connection" json:""` + // SessionMetadata is a common event session metadata + SessionMetadata `protobuf:"bytes,4,opt,name=Session,embedded=Session" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,5,opt,name=Server,embedded=Server" json:""` + // CommandMetadata is a common command metadata + CommandMetadata `protobuf:"bytes,6,opt,name=Command,embedded=Command" json:""` + // Path is a copy path + Path string `protobuf:"bytes,7,opt,name=Path,proto3" json:"path"` + // Action is upload or download + Action string `protobuf:"bytes,8,opt,name=Action,proto3" json:"action"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SCP) Reset() { *m = SCP{} } +func (m *SCP) String() string { return proto.CompactTextString(m) } +func (*SCP) ProtoMessage() {} +func (*SCP) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{28} +} +func (m *SCP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SCP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SCP.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SCP) XXX_Merge(src proto.Message) { + xxx_messageInfo_SCP.Merge(dst, src) +} +func (m *SCP) XXX_Size() int { + return m.Size() +} +func (m *SCP) XXX_DiscardUnknown() { + xxx_messageInfo_SCP.DiscardUnknown(m) +} + +var xxx_messageInfo_SCP proto.InternalMessageInfo + +// Subsystem is emitted when a user requests a new subsystem. +type Subsystem struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,3,opt,name=Connection,embedded=Connection" json:""` + // Name is a subsystem name + Name string `protobuf:"bytes,4,opt,name=Name,proto3" json:"name"` + // Error contains error in case of unsucessfull attempt + Error string `protobuf:"bytes,5,opt,name=Error,proto3" json:"exitError"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Subsystem) Reset() { *m = Subsystem{} } +func (m *Subsystem) String() string { return proto.CompactTextString(m) } +func (*Subsystem) ProtoMessage() {} +func (*Subsystem) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{29} +} +func (m *Subsystem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subsystem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Subsystem.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Subsystem) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subsystem.Merge(dst, src) +} +func (m *Subsystem) XXX_Size() int { + return m.Size() +} +func (m *Subsystem) XXX_DiscardUnknown() { + xxx_messageInfo_Subsystem.DiscardUnknown(m) +} + +var xxx_messageInfo_Subsystem proto.InternalMessageInfo + +// ClientDisconnect is emitted when client is disconnected +// by the server due to inactivity or any other reason +type ClientDisconnect struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,3,opt,name=Connection,embedded=Connection" json:""` + // ServerMetadata is a common server metadata + ServerMetadata `protobuf:"bytes,4,opt,name=Server,embedded=Server" json:""` + // Reason is a field that specifies reason for event, e.g. in disconnect + // event it explains why server disconnected the client + Reason string `protobuf:"bytes,5,opt,name=Reason,proto3" json:"reason"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientDisconnect) Reset() { *m = ClientDisconnect{} } +func (m *ClientDisconnect) String() string { return proto.CompactTextString(m) } +func (*ClientDisconnect) ProtoMessage() {} +func (*ClientDisconnect) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{30} +} +func (m *ClientDisconnect) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientDisconnect) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientDisconnect.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ClientDisconnect) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientDisconnect.Merge(dst, src) +} +func (m *ClientDisconnect) XXX_Size() int { + return m.Size() +} +func (m *ClientDisconnect) XXX_DiscardUnknown() { + xxx_messageInfo_ClientDisconnect.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientDisconnect proto.InternalMessageInfo + +// AuthAttempt is emitted upon a failed or successfull authentication attempt. +type AuthAttempt struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,2,opt,name=User,embedded=User" json:""` + // ConnectionMetadata holds information about the connection + ConnectionMetadata `protobuf:"bytes,3,opt,name=Connection,embedded=Connection" json:""` + // Status contains common command or operation status fields + Status `protobuf:"bytes,4,opt,name=Status,embedded=Status" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthAttempt) Reset() { *m = AuthAttempt{} } +func (m *AuthAttempt) String() string { return proto.CompactTextString(m) } +func (*AuthAttempt) ProtoMessage() {} +func (*AuthAttempt) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{31} +} +func (m *AuthAttempt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthAttempt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthAttempt.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AuthAttempt) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthAttempt.Merge(dst, src) +} +func (m *AuthAttempt) XXX_Size() int { + return m.Size() +} +func (m *AuthAttempt) XXX_DiscardUnknown() { + xxx_messageInfo_AuthAttempt.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthAttempt proto.InternalMessageInfo + +// ResetPasswordTokenCreate is emitted when token is created. +type ResetPasswordTokenCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetPasswordTokenCreate) Reset() { *m = ResetPasswordTokenCreate{} } +func (m *ResetPasswordTokenCreate) String() string { return proto.CompactTextString(m) } +func (*ResetPasswordTokenCreate) ProtoMessage() {} +func (*ResetPasswordTokenCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{32} +} +func (m *ResetPasswordTokenCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResetPasswordTokenCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResetPasswordTokenCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResetPasswordTokenCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetPasswordTokenCreate.Merge(dst, src) +} +func (m *ResetPasswordTokenCreate) XXX_Size() int { + return m.Size() +} +func (m *ResetPasswordTokenCreate) XXX_DiscardUnknown() { + xxx_messageInfo_ResetPasswordTokenCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetPasswordTokenCreate proto.InternalMessageInfo + +// RoleCreate is emitted when a role is created/updated. +type RoleCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RoleCreate) Reset() { *m = RoleCreate{} } +func (m *RoleCreate) String() string { return proto.CompactTextString(m) } +func (*RoleCreate) ProtoMessage() {} +func (*RoleCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{33} +} +func (m *RoleCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RoleCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RoleCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleCreate.Merge(dst, src) +} +func (m *RoleCreate) XXX_Size() int { + return m.Size() +} +func (m *RoleCreate) XXX_DiscardUnknown() { + xxx_messageInfo_RoleCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleCreate proto.InternalMessageInfo + +// RoleDelete is emitted when a role is deleted +type RoleDelete struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RoleDelete) Reset() { *m = RoleDelete{} } +func (m *RoleDelete) String() string { return proto.CompactTextString(m) } +func (*RoleDelete) ProtoMessage() {} +func (*RoleDelete) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{34} +} +func (m *RoleDelete) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RoleDelete.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RoleDelete) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleDelete.Merge(dst, src) +} +func (m *RoleDelete) XXX_Size() int { + return m.Size() +} +func (m *RoleDelete) XXX_DiscardUnknown() { + xxx_messageInfo_RoleDelete.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleDelete proto.InternalMessageInfo + +// TrustedClusterCreate is the event for creating a trusted cluster. +type TrustedClusterCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrustedClusterCreate) Reset() { *m = TrustedClusterCreate{} } +func (m *TrustedClusterCreate) String() string { return proto.CompactTextString(m) } +func (*TrustedClusterCreate) ProtoMessage() {} +func (*TrustedClusterCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{35} +} +func (m *TrustedClusterCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TrustedClusterCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TrustedClusterCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TrustedClusterCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrustedClusterCreate.Merge(dst, src) +} +func (m *TrustedClusterCreate) XXX_Size() int { + return m.Size() +} +func (m *TrustedClusterCreate) XXX_DiscardUnknown() { + xxx_messageInfo_TrustedClusterCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_TrustedClusterCreate proto.InternalMessageInfo + +// TrustedClusterDelete is the event for removing a trusted cluster. +type TrustedClusterDelete struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrustedClusterDelete) Reset() { *m = TrustedClusterDelete{} } +func (m *TrustedClusterDelete) String() string { return proto.CompactTextString(m) } +func (*TrustedClusterDelete) ProtoMessage() {} +func (*TrustedClusterDelete) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{36} +} +func (m *TrustedClusterDelete) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TrustedClusterDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TrustedClusterDelete.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TrustedClusterDelete) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrustedClusterDelete.Merge(dst, src) +} +func (m *TrustedClusterDelete) XXX_Size() int { + return m.Size() +} +func (m *TrustedClusterDelete) XXX_DiscardUnknown() { + xxx_messageInfo_TrustedClusterDelete.DiscardUnknown(m) +} + +var xxx_messageInfo_TrustedClusterDelete proto.InternalMessageInfo + +// TrustedClusterTokenCreate is the event for +// creating new join token for a trusted cluster. +type TrustedClusterTokenCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TrustedClusterTokenCreate) Reset() { *m = TrustedClusterTokenCreate{} } +func (m *TrustedClusterTokenCreate) String() string { return proto.CompactTextString(m) } +func (*TrustedClusterTokenCreate) ProtoMessage() {} +func (*TrustedClusterTokenCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{37} +} +func (m *TrustedClusterTokenCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TrustedClusterTokenCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TrustedClusterTokenCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TrustedClusterTokenCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_TrustedClusterTokenCreate.Merge(dst, src) +} +func (m *TrustedClusterTokenCreate) XXX_Size() int { + return m.Size() +} +func (m *TrustedClusterTokenCreate) XXX_DiscardUnknown() { + xxx_messageInfo_TrustedClusterTokenCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_TrustedClusterTokenCreate proto.InternalMessageInfo + +// GithubConnectorCreate fires when a Github connector is created/updated. +type GithubConnectorCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GithubConnectorCreate) Reset() { *m = GithubConnectorCreate{} } +func (m *GithubConnectorCreate) String() string { return proto.CompactTextString(m) } +func (*GithubConnectorCreate) ProtoMessage() {} +func (*GithubConnectorCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{38} +} +func (m *GithubConnectorCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GithubConnectorCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GithubConnectorCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GithubConnectorCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_GithubConnectorCreate.Merge(dst, src) +} +func (m *GithubConnectorCreate) XXX_Size() int { + return m.Size() +} +func (m *GithubConnectorCreate) XXX_DiscardUnknown() { + xxx_messageInfo_GithubConnectorCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_GithubConnectorCreate proto.InternalMessageInfo + +// GithubConnectorDelete fires when a Github connector is deleted. +type GithubConnectorDelete struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GithubConnectorDelete) Reset() { *m = GithubConnectorDelete{} } +func (m *GithubConnectorDelete) String() string { return proto.CompactTextString(m) } +func (*GithubConnectorDelete) ProtoMessage() {} +func (*GithubConnectorDelete) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{39} +} +func (m *GithubConnectorDelete) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GithubConnectorDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GithubConnectorDelete.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *GithubConnectorDelete) XXX_Merge(src proto.Message) { + xxx_messageInfo_GithubConnectorDelete.Merge(dst, src) +} +func (m *GithubConnectorDelete) XXX_Size() int { + return m.Size() +} +func (m *GithubConnectorDelete) XXX_DiscardUnknown() { + xxx_messageInfo_GithubConnectorDelete.DiscardUnknown(m) +} + +var xxx_messageInfo_GithubConnectorDelete proto.InternalMessageInfo + +// OIDCConnectorCreate fires when OIDC connector is created/updated. +type OIDCConnectorCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OIDCConnectorCreate) Reset() { *m = OIDCConnectorCreate{} } +func (m *OIDCConnectorCreate) String() string { return proto.CompactTextString(m) } +func (*OIDCConnectorCreate) ProtoMessage() {} +func (*OIDCConnectorCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{40} +} +func (m *OIDCConnectorCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OIDCConnectorCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OIDCConnectorCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *OIDCConnectorCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_OIDCConnectorCreate.Merge(dst, src) +} +func (m *OIDCConnectorCreate) XXX_Size() int { + return m.Size() +} +func (m *OIDCConnectorCreate) XXX_DiscardUnknown() { + xxx_messageInfo_OIDCConnectorCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_OIDCConnectorCreate proto.InternalMessageInfo + +// OIDCConnectorDelete fires when OIDC connector is deleted. +type OIDCConnectorDelete struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OIDCConnectorDelete) Reset() { *m = OIDCConnectorDelete{} } +func (m *OIDCConnectorDelete) String() string { return proto.CompactTextString(m) } +func (*OIDCConnectorDelete) ProtoMessage() {} +func (*OIDCConnectorDelete) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{41} +} +func (m *OIDCConnectorDelete) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OIDCConnectorDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OIDCConnectorDelete.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *OIDCConnectorDelete) XXX_Merge(src proto.Message) { + xxx_messageInfo_OIDCConnectorDelete.Merge(dst, src) +} +func (m *OIDCConnectorDelete) XXX_Size() int { + return m.Size() +} +func (m *OIDCConnectorDelete) XXX_DiscardUnknown() { + xxx_messageInfo_OIDCConnectorDelete.DiscardUnknown(m) +} + +var xxx_messageInfo_OIDCConnectorDelete proto.InternalMessageInfo + +// SAMLConnectorCreate fires when SAML connector is created/updated. +type SAMLConnectorCreate struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SAMLConnectorCreate) Reset() { *m = SAMLConnectorCreate{} } +func (m *SAMLConnectorCreate) String() string { return proto.CompactTextString(m) } +func (*SAMLConnectorCreate) ProtoMessage() {} +func (*SAMLConnectorCreate) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{42} +} +func (m *SAMLConnectorCreate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SAMLConnectorCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SAMLConnectorCreate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SAMLConnectorCreate) XXX_Merge(src proto.Message) { + xxx_messageInfo_SAMLConnectorCreate.Merge(dst, src) +} +func (m *SAMLConnectorCreate) XXX_Size() int { + return m.Size() +} +func (m *SAMLConnectorCreate) XXX_DiscardUnknown() { + xxx_messageInfo_SAMLConnectorCreate.DiscardUnknown(m) +} + +var xxx_messageInfo_SAMLConnectorCreate proto.InternalMessageInfo + +// SAMLConnectorDelete fires when SAML connector is deleted. +type SAMLConnectorDelete struct { + // Metadata is a common event metadata + Metadata `protobuf:"bytes,1,opt,name=Metadata,embedded=Metadata" json:""` + // ResourceMetadata is a common resource event metadata + ResourceMetadata `protobuf:"bytes,2,opt,name=Resource,embedded=Resource" json:""` + // User is a common user event metadata + UserMetadata `protobuf:"bytes,3,opt,name=User,embedded=User" json:""` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SAMLConnectorDelete) Reset() { *m = SAMLConnectorDelete{} } +func (m *SAMLConnectorDelete) String() string { return proto.CompactTextString(m) } +func (*SAMLConnectorDelete) ProtoMessage() {} +func (*SAMLConnectorDelete) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{43} +} +func (m *SAMLConnectorDelete) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SAMLConnectorDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SAMLConnectorDelete.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SAMLConnectorDelete) XXX_Merge(src proto.Message) { + xxx_messageInfo_SAMLConnectorDelete.Merge(dst, src) +} +func (m *SAMLConnectorDelete) XXX_Size() int { + return m.Size() +} +func (m *SAMLConnectorDelete) XXX_DiscardUnknown() { + xxx_messageInfo_SAMLConnectorDelete.DiscardUnknown(m) +} + +var xxx_messageInfo_SAMLConnectorDelete proto.InternalMessageInfo + +// OneOf is a union of one of audit events submitted to the auth service +type OneOf struct { + // Event is one of the audit events + // + // Types that are valid to be assigned to Event: + // *OneOf_UserLogin + // *OneOf_UserCreate + // *OneOf_UserDelete + // *OneOf_UserPasswordChange + // *OneOf_SessionStart + // *OneOf_SessionJoin + // *OneOf_SessionPrint + // *OneOf_SessionReject + // *OneOf_Resize + // *OneOf_SessionEnd + // *OneOf_SessionCommand + // *OneOf_SessionDisk + // *OneOf_SessionNetwork + // *OneOf_SessionData + // *OneOf_SessionLeave + // *OneOf_PortForward + // *OneOf_X11Forward + // *OneOf_SCP + // *OneOf_Exec + // *OneOf_Subsystem + // *OneOf_ClientDisconnect + // *OneOf_AuthAttempt + // *OneOf_AccessRequestCreate + // *OneOf_ResetPasswordTokenCreate + // *OneOf_RoleCreate + // *OneOf_RoleDelete + // *OneOf_TrustedClusterCreate + // *OneOf_TrustedClusterDelete + // *OneOf_TrustedClusterTokenCreate + // *OneOf_GithubConnectorCreate + // *OneOf_GithubConnectorDelete + // *OneOf_OIDCConnectorCreate + // *OneOf_OIDCConnectorDelete + // *OneOf_SAMLConnectorCreate + // *OneOf_SAMLConnectorDelete + Event isOneOf_Event `protobuf_oneof:"Event"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneOf) Reset() { *m = OneOf{} } +func (m *OneOf) String() string { return proto.CompactTextString(m) } +func (*OneOf) ProtoMessage() {} +func (*OneOf) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{44} +} +func (m *OneOf) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OneOf) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OneOf.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *OneOf) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneOf.Merge(dst, src) +} +func (m *OneOf) XXX_Size() int { + return m.Size() +} +func (m *OneOf) XXX_DiscardUnknown() { + xxx_messageInfo_OneOf.DiscardUnknown(m) +} + +var xxx_messageInfo_OneOf proto.InternalMessageInfo + +type isOneOf_Event interface { + isOneOf_Event() + MarshalTo([]byte) (int, error) + Size() int +} + +type OneOf_UserLogin struct { + UserLogin *UserLogin `protobuf:"bytes,1,opt,name=UserLogin,oneof"` +} +type OneOf_UserCreate struct { + UserCreate *UserCreate `protobuf:"bytes,2,opt,name=UserCreate,oneof"` +} +type OneOf_UserDelete struct { + UserDelete *UserDelete `protobuf:"bytes,3,opt,name=UserDelete,oneof"` +} +type OneOf_UserPasswordChange struct { + UserPasswordChange *UserPasswordChange `protobuf:"bytes,4,opt,name=UserPasswordChange,oneof"` +} +type OneOf_SessionStart struct { + SessionStart *SessionStart `protobuf:"bytes,5,opt,name=SessionStart,oneof"` +} +type OneOf_SessionJoin struct { + SessionJoin *SessionJoin `protobuf:"bytes,6,opt,name=SessionJoin,oneof"` +} +type OneOf_SessionPrint struct { + SessionPrint *SessionPrint `protobuf:"bytes,7,opt,name=SessionPrint,oneof"` +} +type OneOf_SessionReject struct { + SessionReject *SessionReject `protobuf:"bytes,8,opt,name=SessionReject,oneof"` +} +type OneOf_Resize struct { + Resize *Resize `protobuf:"bytes,9,opt,name=Resize,oneof"` +} +type OneOf_SessionEnd struct { + SessionEnd *SessionEnd `protobuf:"bytes,10,opt,name=SessionEnd,oneof"` +} +type OneOf_SessionCommand struct { + SessionCommand *SessionCommand `protobuf:"bytes,11,opt,name=SessionCommand,oneof"` +} +type OneOf_SessionDisk struct { + SessionDisk *SessionDisk `protobuf:"bytes,12,opt,name=SessionDisk,oneof"` +} +type OneOf_SessionNetwork struct { + SessionNetwork *SessionNetwork `protobuf:"bytes,13,opt,name=SessionNetwork,oneof"` +} +type OneOf_SessionData struct { + SessionData *SessionData `protobuf:"bytes,14,opt,name=SessionData,oneof"` +} +type OneOf_SessionLeave struct { + SessionLeave *SessionLeave `protobuf:"bytes,15,opt,name=SessionLeave,oneof"` +} +type OneOf_PortForward struct { + PortForward *PortForward `protobuf:"bytes,16,opt,name=PortForward,oneof"` +} +type OneOf_X11Forward struct { + X11Forward *X11Forward `protobuf:"bytes,17,opt,name=X11Forward,oneof"` +} +type OneOf_SCP struct { + SCP *SCP `protobuf:"bytes,18,opt,name=SCP,oneof"` +} +type OneOf_Exec struct { + Exec *Exec `protobuf:"bytes,19,opt,name=Exec,oneof"` +} +type OneOf_Subsystem struct { + Subsystem *Subsystem `protobuf:"bytes,20,opt,name=Subsystem,oneof"` +} +type OneOf_ClientDisconnect struct { + ClientDisconnect *ClientDisconnect `protobuf:"bytes,21,opt,name=ClientDisconnect,oneof"` +} +type OneOf_AuthAttempt struct { + AuthAttempt *AuthAttempt `protobuf:"bytes,22,opt,name=AuthAttempt,oneof"` +} +type OneOf_AccessRequestCreate struct { + AccessRequestCreate *AccessRequestCreate `protobuf:"bytes,23,opt,name=AccessRequestCreate,oneof"` +} +type OneOf_ResetPasswordTokenCreate struct { + ResetPasswordTokenCreate *ResetPasswordTokenCreate `protobuf:"bytes,24,opt,name=ResetPasswordTokenCreate,oneof"` +} +type OneOf_RoleCreate struct { + RoleCreate *RoleCreate `protobuf:"bytes,25,opt,name=RoleCreate,oneof"` +} +type OneOf_RoleDelete struct { + RoleDelete *RoleDelete `protobuf:"bytes,26,opt,name=RoleDelete,oneof"` +} +type OneOf_TrustedClusterCreate struct { + TrustedClusterCreate *TrustedClusterCreate `protobuf:"bytes,27,opt,name=TrustedClusterCreate,oneof"` +} +type OneOf_TrustedClusterDelete struct { + TrustedClusterDelete *TrustedClusterDelete `protobuf:"bytes,28,opt,name=TrustedClusterDelete,oneof"` +} +type OneOf_TrustedClusterTokenCreate struct { + TrustedClusterTokenCreate *TrustedClusterTokenCreate `protobuf:"bytes,29,opt,name=TrustedClusterTokenCreate,oneof"` +} +type OneOf_GithubConnectorCreate struct { + GithubConnectorCreate *GithubConnectorCreate `protobuf:"bytes,30,opt,name=GithubConnectorCreate,oneof"` +} +type OneOf_GithubConnectorDelete struct { + GithubConnectorDelete *GithubConnectorDelete `protobuf:"bytes,31,opt,name=GithubConnectorDelete,oneof"` +} +type OneOf_OIDCConnectorCreate struct { + OIDCConnectorCreate *OIDCConnectorCreate `protobuf:"bytes,32,opt,name=OIDCConnectorCreate,oneof"` +} +type OneOf_OIDCConnectorDelete struct { + OIDCConnectorDelete *OIDCConnectorDelete `protobuf:"bytes,33,opt,name=OIDCConnectorDelete,oneof"` +} +type OneOf_SAMLConnectorCreate struct { + SAMLConnectorCreate *SAMLConnectorCreate `protobuf:"bytes,34,opt,name=SAMLConnectorCreate,oneof"` +} +type OneOf_SAMLConnectorDelete struct { + SAMLConnectorDelete *SAMLConnectorDelete `protobuf:"bytes,35,opt,name=SAMLConnectorDelete,oneof"` +} + +func (*OneOf_UserLogin) isOneOf_Event() {} +func (*OneOf_UserCreate) isOneOf_Event() {} +func (*OneOf_UserDelete) isOneOf_Event() {} +func (*OneOf_UserPasswordChange) isOneOf_Event() {} +func (*OneOf_SessionStart) isOneOf_Event() {} +func (*OneOf_SessionJoin) isOneOf_Event() {} +func (*OneOf_SessionPrint) isOneOf_Event() {} +func (*OneOf_SessionReject) isOneOf_Event() {} +func (*OneOf_Resize) isOneOf_Event() {} +func (*OneOf_SessionEnd) isOneOf_Event() {} +func (*OneOf_SessionCommand) isOneOf_Event() {} +func (*OneOf_SessionDisk) isOneOf_Event() {} +func (*OneOf_SessionNetwork) isOneOf_Event() {} +func (*OneOf_SessionData) isOneOf_Event() {} +func (*OneOf_SessionLeave) isOneOf_Event() {} +func (*OneOf_PortForward) isOneOf_Event() {} +func (*OneOf_X11Forward) isOneOf_Event() {} +func (*OneOf_SCP) isOneOf_Event() {} +func (*OneOf_Exec) isOneOf_Event() {} +func (*OneOf_Subsystem) isOneOf_Event() {} +func (*OneOf_ClientDisconnect) isOneOf_Event() {} +func (*OneOf_AuthAttempt) isOneOf_Event() {} +func (*OneOf_AccessRequestCreate) isOneOf_Event() {} +func (*OneOf_ResetPasswordTokenCreate) isOneOf_Event() {} +func (*OneOf_RoleCreate) isOneOf_Event() {} +func (*OneOf_RoleDelete) isOneOf_Event() {} +func (*OneOf_TrustedClusterCreate) isOneOf_Event() {} +func (*OneOf_TrustedClusterDelete) isOneOf_Event() {} +func (*OneOf_TrustedClusterTokenCreate) isOneOf_Event() {} +func (*OneOf_GithubConnectorCreate) isOneOf_Event() {} +func (*OneOf_GithubConnectorDelete) isOneOf_Event() {} +func (*OneOf_OIDCConnectorCreate) isOneOf_Event() {} +func (*OneOf_OIDCConnectorDelete) isOneOf_Event() {} +func (*OneOf_SAMLConnectorCreate) isOneOf_Event() {} +func (*OneOf_SAMLConnectorDelete) isOneOf_Event() {} + +func (m *OneOf) GetEvent() isOneOf_Event { + if m != nil { + return m.Event + } + return nil +} + +func (m *OneOf) GetUserLogin() *UserLogin { + if x, ok := m.GetEvent().(*OneOf_UserLogin); ok { + return x.UserLogin + } + return nil +} + +func (m *OneOf) GetUserCreate() *UserCreate { + if x, ok := m.GetEvent().(*OneOf_UserCreate); ok { + return x.UserCreate + } + return nil +} + +func (m *OneOf) GetUserDelete() *UserDelete { + if x, ok := m.GetEvent().(*OneOf_UserDelete); ok { + return x.UserDelete + } + return nil +} + +func (m *OneOf) GetUserPasswordChange() *UserPasswordChange { + if x, ok := m.GetEvent().(*OneOf_UserPasswordChange); ok { + return x.UserPasswordChange + } + return nil +} + +func (m *OneOf) GetSessionStart() *SessionStart { + if x, ok := m.GetEvent().(*OneOf_SessionStart); ok { + return x.SessionStart + } + return nil +} + +func (m *OneOf) GetSessionJoin() *SessionJoin { + if x, ok := m.GetEvent().(*OneOf_SessionJoin); ok { + return x.SessionJoin + } + return nil +} + +func (m *OneOf) GetSessionPrint() *SessionPrint { + if x, ok := m.GetEvent().(*OneOf_SessionPrint); ok { + return x.SessionPrint + } + return nil +} + +func (m *OneOf) GetSessionReject() *SessionReject { + if x, ok := m.GetEvent().(*OneOf_SessionReject); ok { + return x.SessionReject + } + return nil +} + +func (m *OneOf) GetResize() *Resize { + if x, ok := m.GetEvent().(*OneOf_Resize); ok { + return x.Resize + } + return nil +} + +func (m *OneOf) GetSessionEnd() *SessionEnd { + if x, ok := m.GetEvent().(*OneOf_SessionEnd); ok { + return x.SessionEnd + } + return nil +} + +func (m *OneOf) GetSessionCommand() *SessionCommand { + if x, ok := m.GetEvent().(*OneOf_SessionCommand); ok { + return x.SessionCommand + } + return nil +} + +func (m *OneOf) GetSessionDisk() *SessionDisk { + if x, ok := m.GetEvent().(*OneOf_SessionDisk); ok { + return x.SessionDisk + } + return nil +} + +func (m *OneOf) GetSessionNetwork() *SessionNetwork { + if x, ok := m.GetEvent().(*OneOf_SessionNetwork); ok { + return x.SessionNetwork + } + return nil +} + +func (m *OneOf) GetSessionData() *SessionData { + if x, ok := m.GetEvent().(*OneOf_SessionData); ok { + return x.SessionData + } + return nil +} + +func (m *OneOf) GetSessionLeave() *SessionLeave { + if x, ok := m.GetEvent().(*OneOf_SessionLeave); ok { + return x.SessionLeave + } + return nil +} + +func (m *OneOf) GetPortForward() *PortForward { + if x, ok := m.GetEvent().(*OneOf_PortForward); ok { + return x.PortForward + } + return nil +} + +func (m *OneOf) GetX11Forward() *X11Forward { + if x, ok := m.GetEvent().(*OneOf_X11Forward); ok { + return x.X11Forward + } + return nil +} + +func (m *OneOf) GetSCP() *SCP { + if x, ok := m.GetEvent().(*OneOf_SCP); ok { + return x.SCP + } + return nil +} + +func (m *OneOf) GetExec() *Exec { + if x, ok := m.GetEvent().(*OneOf_Exec); ok { + return x.Exec + } + return nil +} + +func (m *OneOf) GetSubsystem() *Subsystem { + if x, ok := m.GetEvent().(*OneOf_Subsystem); ok { + return x.Subsystem + } + return nil +} + +func (m *OneOf) GetClientDisconnect() *ClientDisconnect { + if x, ok := m.GetEvent().(*OneOf_ClientDisconnect); ok { + return x.ClientDisconnect + } + return nil +} + +func (m *OneOf) GetAuthAttempt() *AuthAttempt { + if x, ok := m.GetEvent().(*OneOf_AuthAttempt); ok { + return x.AuthAttempt + } + return nil +} + +func (m *OneOf) GetAccessRequestCreate() *AccessRequestCreate { + if x, ok := m.GetEvent().(*OneOf_AccessRequestCreate); ok { + return x.AccessRequestCreate + } + return nil +} + +func (m *OneOf) GetResetPasswordTokenCreate() *ResetPasswordTokenCreate { + if x, ok := m.GetEvent().(*OneOf_ResetPasswordTokenCreate); ok { + return x.ResetPasswordTokenCreate + } + return nil +} + +func (m *OneOf) GetRoleCreate() *RoleCreate { + if x, ok := m.GetEvent().(*OneOf_RoleCreate); ok { + return x.RoleCreate + } + return nil +} + +func (m *OneOf) GetRoleDelete() *RoleDelete { + if x, ok := m.GetEvent().(*OneOf_RoleDelete); ok { + return x.RoleDelete + } + return nil +} + +func (m *OneOf) GetTrustedClusterCreate() *TrustedClusterCreate { + if x, ok := m.GetEvent().(*OneOf_TrustedClusterCreate); ok { + return x.TrustedClusterCreate + } + return nil +} + +func (m *OneOf) GetTrustedClusterDelete() *TrustedClusterDelete { + if x, ok := m.GetEvent().(*OneOf_TrustedClusterDelete); ok { + return x.TrustedClusterDelete + } + return nil +} + +func (m *OneOf) GetTrustedClusterTokenCreate() *TrustedClusterTokenCreate { + if x, ok := m.GetEvent().(*OneOf_TrustedClusterTokenCreate); ok { + return x.TrustedClusterTokenCreate + } + return nil +} + +func (m *OneOf) GetGithubConnectorCreate() *GithubConnectorCreate { + if x, ok := m.GetEvent().(*OneOf_GithubConnectorCreate); ok { + return x.GithubConnectorCreate + } + return nil +} + +func (m *OneOf) GetGithubConnectorDelete() *GithubConnectorDelete { + if x, ok := m.GetEvent().(*OneOf_GithubConnectorDelete); ok { + return x.GithubConnectorDelete + } + return nil +} + +func (m *OneOf) GetOIDCConnectorCreate() *OIDCConnectorCreate { + if x, ok := m.GetEvent().(*OneOf_OIDCConnectorCreate); ok { + return x.OIDCConnectorCreate + } + return nil +} + +func (m *OneOf) GetOIDCConnectorDelete() *OIDCConnectorDelete { + if x, ok := m.GetEvent().(*OneOf_OIDCConnectorDelete); ok { + return x.OIDCConnectorDelete + } + return nil +} + +func (m *OneOf) GetSAMLConnectorCreate() *SAMLConnectorCreate { + if x, ok := m.GetEvent().(*OneOf_SAMLConnectorCreate); ok { + return x.SAMLConnectorCreate + } + return nil +} + +func (m *OneOf) GetSAMLConnectorDelete() *SAMLConnectorDelete { + if x, ok := m.GetEvent().(*OneOf_SAMLConnectorDelete); ok { + return x.SAMLConnectorDelete + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*OneOf) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _OneOf_OneofMarshaler, _OneOf_OneofUnmarshaler, _OneOf_OneofSizer, []interface{}{ + (*OneOf_UserLogin)(nil), + (*OneOf_UserCreate)(nil), + (*OneOf_UserDelete)(nil), + (*OneOf_UserPasswordChange)(nil), + (*OneOf_SessionStart)(nil), + (*OneOf_SessionJoin)(nil), + (*OneOf_SessionPrint)(nil), + (*OneOf_SessionReject)(nil), + (*OneOf_Resize)(nil), + (*OneOf_SessionEnd)(nil), + (*OneOf_SessionCommand)(nil), + (*OneOf_SessionDisk)(nil), + (*OneOf_SessionNetwork)(nil), + (*OneOf_SessionData)(nil), + (*OneOf_SessionLeave)(nil), + (*OneOf_PortForward)(nil), + (*OneOf_X11Forward)(nil), + (*OneOf_SCP)(nil), + (*OneOf_Exec)(nil), + (*OneOf_Subsystem)(nil), + (*OneOf_ClientDisconnect)(nil), + (*OneOf_AuthAttempt)(nil), + (*OneOf_AccessRequestCreate)(nil), + (*OneOf_ResetPasswordTokenCreate)(nil), + (*OneOf_RoleCreate)(nil), + (*OneOf_RoleDelete)(nil), + (*OneOf_TrustedClusterCreate)(nil), + (*OneOf_TrustedClusterDelete)(nil), + (*OneOf_TrustedClusterTokenCreate)(nil), + (*OneOf_GithubConnectorCreate)(nil), + (*OneOf_GithubConnectorDelete)(nil), + (*OneOf_OIDCConnectorCreate)(nil), + (*OneOf_OIDCConnectorDelete)(nil), + (*OneOf_SAMLConnectorCreate)(nil), + (*OneOf_SAMLConnectorDelete)(nil), + } +} + +func _OneOf_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*OneOf) + // Event + switch x := m.Event.(type) { + case *OneOf_UserLogin: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UserLogin); err != nil { + return err + } + case *OneOf_UserCreate: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UserCreate); err != nil { + return err + } + case *OneOf_UserDelete: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UserDelete); err != nil { + return err + } + case *OneOf_UserPasswordChange: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UserPasswordChange); err != nil { + return err + } + case *OneOf_SessionStart: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionStart); err != nil { + return err + } + case *OneOf_SessionJoin: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionJoin); err != nil { + return err + } + case *OneOf_SessionPrint: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionPrint); err != nil { + return err + } + case *OneOf_SessionReject: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionReject); err != nil { + return err + } + case *OneOf_Resize: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resize); err != nil { + return err + } + case *OneOf_SessionEnd: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionEnd); err != nil { + return err + } + case *OneOf_SessionCommand: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionCommand); err != nil { + return err + } + case *OneOf_SessionDisk: + _ = b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionDisk); err != nil { + return err + } + case *OneOf_SessionNetwork: + _ = b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionNetwork); err != nil { + return err + } + case *OneOf_SessionData: + _ = b.EncodeVarint(14<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionData); err != nil { + return err + } + case *OneOf_SessionLeave: + _ = b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SessionLeave); err != nil { + return err + } + case *OneOf_PortForward: + _ = b.EncodeVarint(16<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PortForward); err != nil { + return err + } + case *OneOf_X11Forward: + _ = b.EncodeVarint(17<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.X11Forward); err != nil { + return err + } + case *OneOf_SCP: + _ = b.EncodeVarint(18<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SCP); err != nil { + return err + } + case *OneOf_Exec: + _ = b.EncodeVarint(19<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Exec); err != nil { + return err + } + case *OneOf_Subsystem: + _ = b.EncodeVarint(20<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Subsystem); err != nil { + return err + } + case *OneOf_ClientDisconnect: + _ = b.EncodeVarint(21<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientDisconnect); err != nil { + return err + } + case *OneOf_AuthAttempt: + _ = b.EncodeVarint(22<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AuthAttempt); err != nil { + return err + } + case *OneOf_AccessRequestCreate: + _ = b.EncodeVarint(23<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AccessRequestCreate); err != nil { + return err + } + case *OneOf_ResetPasswordTokenCreate: + _ = b.EncodeVarint(24<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResetPasswordTokenCreate); err != nil { + return err + } + case *OneOf_RoleCreate: + _ = b.EncodeVarint(25<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RoleCreate); err != nil { + return err + } + case *OneOf_RoleDelete: + _ = b.EncodeVarint(26<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RoleDelete); err != nil { + return err + } + case *OneOf_TrustedClusterCreate: + _ = b.EncodeVarint(27<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrustedClusterCreate); err != nil { + return err + } + case *OneOf_TrustedClusterDelete: + _ = b.EncodeVarint(28<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrustedClusterDelete); err != nil { + return err + } + case *OneOf_TrustedClusterTokenCreate: + _ = b.EncodeVarint(29<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TrustedClusterTokenCreate); err != nil { + return err + } + case *OneOf_GithubConnectorCreate: + _ = b.EncodeVarint(30<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GithubConnectorCreate); err != nil { + return err + } + case *OneOf_GithubConnectorDelete: + _ = b.EncodeVarint(31<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.GithubConnectorDelete); err != nil { + return err + } + case *OneOf_OIDCConnectorCreate: + _ = b.EncodeVarint(32<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.OIDCConnectorCreate); err != nil { + return err + } + case *OneOf_OIDCConnectorDelete: + _ = b.EncodeVarint(33<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.OIDCConnectorDelete); err != nil { + return err + } + case *OneOf_SAMLConnectorCreate: + _ = b.EncodeVarint(34<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SAMLConnectorCreate); err != nil { + return err + } + case *OneOf_SAMLConnectorDelete: + _ = b.EncodeVarint(35<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SAMLConnectorDelete); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("OneOf.Event has unexpected type %T", x) + } + return nil +} + +func _OneOf_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*OneOf) + switch tag { + case 1: // Event.UserLogin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UserLogin) + err := b.DecodeMessage(msg) + m.Event = &OneOf_UserLogin{msg} + return true, err + case 2: // Event.UserCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UserCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_UserCreate{msg} + return true, err + case 3: // Event.UserDelete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UserDelete) + err := b.DecodeMessage(msg) + m.Event = &OneOf_UserDelete{msg} + return true, err + case 4: // Event.UserPasswordChange + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UserPasswordChange) + err := b.DecodeMessage(msg) + m.Event = &OneOf_UserPasswordChange{msg} + return true, err + case 5: // Event.SessionStart + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionStart) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionStart{msg} + return true, err + case 6: // Event.SessionJoin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionJoin) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionJoin{msg} + return true, err + case 7: // Event.SessionPrint + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionPrint) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionPrint{msg} + return true, err + case 8: // Event.SessionReject + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionReject) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionReject{msg} + return true, err + case 9: // Event.Resize + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resize) + err := b.DecodeMessage(msg) + m.Event = &OneOf_Resize{msg} + return true, err + case 10: // Event.SessionEnd + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionEnd) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionEnd{msg} + return true, err + case 11: // Event.SessionCommand + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionCommand) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionCommand{msg} + return true, err + case 12: // Event.SessionDisk + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionDisk) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionDisk{msg} + return true, err + case 13: // Event.SessionNetwork + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionNetwork) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionNetwork{msg} + return true, err + case 14: // Event.SessionData + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionData) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionData{msg} + return true, err + case 15: // Event.SessionLeave + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SessionLeave) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SessionLeave{msg} + return true, err + case 16: // Event.PortForward + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PortForward) + err := b.DecodeMessage(msg) + m.Event = &OneOf_PortForward{msg} + return true, err + case 17: // Event.X11Forward + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(X11Forward) + err := b.DecodeMessage(msg) + m.Event = &OneOf_X11Forward{msg} + return true, err + case 18: // Event.SCP + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SCP) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SCP{msg} + return true, err + case 19: // Event.Exec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Exec) + err := b.DecodeMessage(msg) + m.Event = &OneOf_Exec{msg} + return true, err + case 20: // Event.Subsystem + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Subsystem) + err := b.DecodeMessage(msg) + m.Event = &OneOf_Subsystem{msg} + return true, err + case 21: // Event.ClientDisconnect + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientDisconnect) + err := b.DecodeMessage(msg) + m.Event = &OneOf_ClientDisconnect{msg} + return true, err + case 22: // Event.AuthAttempt + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AuthAttempt) + err := b.DecodeMessage(msg) + m.Event = &OneOf_AuthAttempt{msg} + return true, err + case 23: // Event.AccessRequestCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AccessRequestCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_AccessRequestCreate{msg} + return true, err + case 24: // Event.ResetPasswordTokenCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ResetPasswordTokenCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_ResetPasswordTokenCreate{msg} + return true, err + case 25: // Event.RoleCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RoleCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_RoleCreate{msg} + return true, err + case 26: // Event.RoleDelete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RoleDelete) + err := b.DecodeMessage(msg) + m.Event = &OneOf_RoleDelete{msg} + return true, err + case 27: // Event.TrustedClusterCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TrustedClusterCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_TrustedClusterCreate{msg} + return true, err + case 28: // Event.TrustedClusterDelete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TrustedClusterDelete) + err := b.DecodeMessage(msg) + m.Event = &OneOf_TrustedClusterDelete{msg} + return true, err + case 29: // Event.TrustedClusterTokenCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TrustedClusterTokenCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_TrustedClusterTokenCreate{msg} + return true, err + case 30: // Event.GithubConnectorCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GithubConnectorCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_GithubConnectorCreate{msg} + return true, err + case 31: // Event.GithubConnectorDelete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GithubConnectorDelete) + err := b.DecodeMessage(msg) + m.Event = &OneOf_GithubConnectorDelete{msg} + return true, err + case 32: // Event.OIDCConnectorCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(OIDCConnectorCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_OIDCConnectorCreate{msg} + return true, err + case 33: // Event.OIDCConnectorDelete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(OIDCConnectorDelete) + err := b.DecodeMessage(msg) + m.Event = &OneOf_OIDCConnectorDelete{msg} + return true, err + case 34: // Event.SAMLConnectorCreate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SAMLConnectorCreate) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SAMLConnectorCreate{msg} + return true, err + case 35: // Event.SAMLConnectorDelete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SAMLConnectorDelete) + err := b.DecodeMessage(msg) + m.Event = &OneOf_SAMLConnectorDelete{msg} + return true, err + default: + return false, nil + } +} + +func _OneOf_OneofSizer(msg proto.Message) (n int) { + m := msg.(*OneOf) + // Event + switch x := m.Event.(type) { + case *OneOf_UserLogin: + s := proto.Size(x.UserLogin) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_UserCreate: + s := proto.Size(x.UserCreate) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_UserDelete: + s := proto.Size(x.UserDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_UserPasswordChange: + s := proto.Size(x.UserPasswordChange) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionStart: + s := proto.Size(x.SessionStart) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionJoin: + s := proto.Size(x.SessionJoin) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionPrint: + s := proto.Size(x.SessionPrint) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionReject: + s := proto.Size(x.SessionReject) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_Resize: + s := proto.Size(x.Resize) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionEnd: + s := proto.Size(x.SessionEnd) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionCommand: + s := proto.Size(x.SessionCommand) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionDisk: + s := proto.Size(x.SessionDisk) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionNetwork: + s := proto.Size(x.SessionNetwork) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionData: + s := proto.Size(x.SessionData) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SessionLeave: + s := proto.Size(x.SessionLeave) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_PortForward: + s := proto.Size(x.PortForward) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_X11Forward: + s := proto.Size(x.X11Forward) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SCP: + s := proto.Size(x.SCP) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_Exec: + s := proto.Size(x.Exec) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_Subsystem: + s := proto.Size(x.Subsystem) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_ClientDisconnect: + s := proto.Size(x.ClientDisconnect) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_AuthAttempt: + s := proto.Size(x.AuthAttempt) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_AccessRequestCreate: + s := proto.Size(x.AccessRequestCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_ResetPasswordTokenCreate: + s := proto.Size(x.ResetPasswordTokenCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_RoleCreate: + s := proto.Size(x.RoleCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_RoleDelete: + s := proto.Size(x.RoleDelete) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_TrustedClusterCreate: + s := proto.Size(x.TrustedClusterCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_TrustedClusterDelete: + s := proto.Size(x.TrustedClusterDelete) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_TrustedClusterTokenCreate: + s := proto.Size(x.TrustedClusterTokenCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_GithubConnectorCreate: + s := proto.Size(x.GithubConnectorCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_GithubConnectorDelete: + s := proto.Size(x.GithubConnectorDelete) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_OIDCConnectorCreate: + s := proto.Size(x.OIDCConnectorCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_OIDCConnectorDelete: + s := proto.Size(x.OIDCConnectorDelete) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SAMLConnectorCreate: + s := proto.Size(x.SAMLConnectorCreate) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_SAMLConnectorDelete: + s := proto.Size(x.SAMLConnectorDelete) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// StreamStatus reflects stream status +type StreamStatus struct { + // UploadID represents upload ID + UploadID string `protobuf:"bytes,1,opt,name=UploadID,proto3" json:"UploadID,omitempty"` + // LastEventIndex updates last event index + LastEventIndex int64 `protobuf:"varint,2,opt,name=LastEventIndex,proto3" json:"LastEventIndex,omitempty"` + // LastUploadTime is the time of the last upload + LastUploadTime time.Time `protobuf:"bytes,3,opt,name=LastUploadTime,stdtime" json:"LastUploadTime"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamStatus) Reset() { *m = StreamStatus{} } +func (m *StreamStatus) String() string { return proto.CompactTextString(m) } +func (*StreamStatus) ProtoMessage() {} +func (*StreamStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_events_577f8782c6396ca4, []int{45} +} +func (m *StreamStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StreamStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamStatus.Merge(dst, src) +} +func (m *StreamStatus) XXX_Size() int { + return m.Size() +} +func (m *StreamStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StreamStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Metadata)(nil), "events.Metadata") + proto.RegisterType((*SessionMetadata)(nil), "events.SessionMetadata") + proto.RegisterType((*UserMetadata)(nil), "events.UserMetadata") + proto.RegisterType((*ServerMetadata)(nil), "events.ServerMetadata") + proto.RegisterMapType((map[string]string)(nil), "events.ServerMetadata.ServerLabelsEntry") + proto.RegisterType((*ConnectionMetadata)(nil), "events.ConnectionMetadata") + proto.RegisterType((*SessionStart)(nil), "events.SessionStart") + proto.RegisterType((*SessionJoin)(nil), "events.SessionJoin") + proto.RegisterType((*SessionPrint)(nil), "events.SessionPrint") + proto.RegisterType((*SessionReject)(nil), "events.SessionReject") + proto.RegisterType((*Resize)(nil), "events.Resize") + proto.RegisterType((*SessionEnd)(nil), "events.SessionEnd") + proto.RegisterType((*BPFMetadata)(nil), "events.BPFMetadata") + proto.RegisterType((*Status)(nil), "events.Status") + proto.RegisterType((*SessionCommand)(nil), "events.SessionCommand") + proto.RegisterType((*SessionDisk)(nil), "events.SessionDisk") + proto.RegisterType((*SessionNetwork)(nil), "events.SessionNetwork") + proto.RegisterType((*SessionData)(nil), "events.SessionData") + proto.RegisterType((*SessionLeave)(nil), "events.SessionLeave") + proto.RegisterType((*UserLogin)(nil), "events.UserLogin") + proto.RegisterType((*ResourceMetadata)(nil), "events.ResourceMetadata") + proto.RegisterType((*UserCreate)(nil), "events.UserCreate") + proto.RegisterType((*UserDelete)(nil), "events.UserDelete") + proto.RegisterType((*UserPasswordChange)(nil), "events.UserPasswordChange") + proto.RegisterType((*AccessRequestCreate)(nil), "events.AccessRequestCreate") + proto.RegisterType((*PortForward)(nil), "events.PortForward") + proto.RegisterType((*X11Forward)(nil), "events.X11Forward") + proto.RegisterType((*CommandMetadata)(nil), "events.CommandMetadata") + proto.RegisterType((*Exec)(nil), "events.Exec") + proto.RegisterType((*SCP)(nil), "events.SCP") + proto.RegisterType((*Subsystem)(nil), "events.Subsystem") + proto.RegisterType((*ClientDisconnect)(nil), "events.ClientDisconnect") + proto.RegisterType((*AuthAttempt)(nil), "events.AuthAttempt") + proto.RegisterType((*ResetPasswordTokenCreate)(nil), "events.ResetPasswordTokenCreate") + proto.RegisterType((*RoleCreate)(nil), "events.RoleCreate") + proto.RegisterType((*RoleDelete)(nil), "events.RoleDelete") + proto.RegisterType((*TrustedClusterCreate)(nil), "events.TrustedClusterCreate") + proto.RegisterType((*TrustedClusterDelete)(nil), "events.TrustedClusterDelete") + proto.RegisterType((*TrustedClusterTokenCreate)(nil), "events.TrustedClusterTokenCreate") + proto.RegisterType((*GithubConnectorCreate)(nil), "events.GithubConnectorCreate") + proto.RegisterType((*GithubConnectorDelete)(nil), "events.GithubConnectorDelete") + proto.RegisterType((*OIDCConnectorCreate)(nil), "events.OIDCConnectorCreate") + proto.RegisterType((*OIDCConnectorDelete)(nil), "events.OIDCConnectorDelete") + proto.RegisterType((*SAMLConnectorCreate)(nil), "events.SAMLConnectorCreate") + proto.RegisterType((*SAMLConnectorDelete)(nil), "events.SAMLConnectorDelete") + proto.RegisterType((*OneOf)(nil), "events.OneOf") + proto.RegisterType((*StreamStatus)(nil), "events.StreamStatus") +} +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Index != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Index)) + } + if len(m.Type) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.ID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Code) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Code))) + i += copy(dAtA[i:], m.Code) + } + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *UserMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Login) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Login))) + i += copy(dAtA[i:], m.Login) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ServerMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServerNamespace) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.ServerNamespace))) + i += copy(dAtA[i:], m.ServerNamespace) + } + if len(m.ServerID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.ServerID))) + i += copy(dAtA[i:], m.ServerID) + } + if len(m.ServerHostname) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.ServerHostname))) + i += copy(dAtA[i:], m.ServerHostname) + } + if len(m.ServerAddr) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.ServerAddr))) + i += copy(dAtA[i:], m.ServerAddr) + } + if len(m.ServerLabels) > 0 { + for k, _ := range m.ServerLabels { + dAtA[i] = 0x2a + i++ + v := m.ServerLabels[k] + mapSize := 1 + len(k) + sovEvents(uint64(len(k))) + 1 + len(v) + sovEvents(uint64(len(v))) + i = encodeVarintEvents(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConnectionMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.LocalAddr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.LocalAddr))) + i += copy(dAtA[i:], m.LocalAddr) + } + if len(m.RemoteAddr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.RemoteAddr))) + i += copy(dAtA[i:], m.RemoteAddr) + } + if len(m.Protocol) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionStart) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionStart) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n3, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n4, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n5, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n6, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.TerminalSize) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.TerminalSize))) + i += copy(dAtA[i:], m.TerminalSize) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionJoin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionJoin) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n7, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n8, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n9, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n10, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n11, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionPrint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionPrint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n12, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + if m.ChunkIndex != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ChunkIndex)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Bytes != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Bytes)) + } + if m.DelayMilliseconds != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.DelayMilliseconds)) + } + if m.Offset != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Offset)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionReject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionReject) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n13, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n14, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n15, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n16, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + if len(m.Reason) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + } + if m.Maximum != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Maximum)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Resize) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resize) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n17, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n18, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n19, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n20, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n21, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + if len(m.TerminalSize) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.TerminalSize))) + i += copy(dAtA[i:], m.TerminalSize) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionEnd) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionEnd) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n22, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n23, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n24, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n25, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n26, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + if m.EnhancedRecording { + dAtA[i] = 0x30 + i++ + if m.EnhancedRecording { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Interactive { + dAtA[i] = 0x38 + i++ + if m.Interactive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Participants) > 0 { + for _, s := range m.Participants { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x4a + i++ + i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime))) + n27, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTime, dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + dAtA[i] = 0x52 + i++ + i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime))) + n28, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTime, dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *BPFMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BPFMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.PID)) + } + if m.CgroupID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.CgroupID)) + } + if len(m.Program) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Program))) + i += copy(dAtA[i:], m.Program) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Success { + dAtA[i] = 0x8 + i++ + if m.Success { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Error) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if len(m.UserMessage) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.UserMessage))) + i += copy(dAtA[i:], m.UserMessage) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionCommand) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionCommand) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n29, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n30, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n31, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n32, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.BPFMetadata.Size())) + n33, err := m.BPFMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + if m.PPID != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.PPID)) + } + if len(m.Path) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if len(m.Argv) > 0 { + for _, s := range m.Argv { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.ReturnCode != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ReturnCode)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionDisk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionDisk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n34, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n35, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n36, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n37, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.BPFMetadata.Size())) + n38, err := m.BPFMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + if len(m.Path) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Flags != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Flags)) + } + if m.ReturnCode != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ReturnCode)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionNetwork) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionNetwork) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n39, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n40, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n41, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n42, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.BPFMetadata.Size())) + n43, err := m.BPFMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + if len(m.SrcAddr) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.SrcAddr))) + i += copy(dAtA[i:], m.SrcAddr) + } + if len(m.DstAddr) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.DstAddr))) + i += copy(dAtA[i:], m.DstAddr) + } + if m.DstPort != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.DstPort)) + } + if m.TCPVersion != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.TCPVersion)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionData) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n44, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n45, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n46, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n47, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n48, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + if m.BytesTransmitted != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.BytesTransmitted)) + } + if m.BytesReceived != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.BytesReceived)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SessionLeave) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionLeave) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n49, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n50, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n51, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n52, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n53, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *UserLogin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserLogin) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n54, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n55, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Status.Size())) + n56, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + if len(m.Method) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Method))) + i += copy(dAtA[i:], m.Method) + } + if m.IdentityAttributes != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.IdentityAttributes.Size())) + n57, err := m.IdentityAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResourceMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Expires))) + n58, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Expires, dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + if len(m.UpdatedBy) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.UpdatedBy))) + i += copy(dAtA[i:], m.UpdatedBy) + } + if len(m.TTL) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.TTL))) + i += copy(dAtA[i:], m.TTL) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *UserCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n59, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n60, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n61, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Connector) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Connector))) + i += copy(dAtA[i:], m.Connector) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *UserDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n62, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n63, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n63 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n64, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *UserPasswordChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserPasswordChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n65, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n65 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n66, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n66 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AccessRequestCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccessRequestCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n67, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n67 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n68, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n68 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n69, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.RequestID) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.RequestID))) + i += copy(dAtA[i:], m.RequestID) + } + if len(m.RequestState) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.RequestState))) + i += copy(dAtA[i:], m.RequestState) + } + if len(m.Delegator) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Delegator))) + i += copy(dAtA[i:], m.Delegator) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PortForward) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortForward) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n70, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n71, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n72, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Status.Size())) + n73, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n73 + if len(m.Addr) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *X11Forward) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *X11Forward) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n74, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n74 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n75, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n75 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n76, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n76 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Status.Size())) + n77, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n77 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *CommandMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommandMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Command))) + i += copy(dAtA[i:], m.Command) + } + if len(m.ExitCode) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.ExitCode))) + i += copy(dAtA[i:], m.ExitCode) + } + if len(m.Error) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Exec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n78, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n78 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n79, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n79 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n80, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n80 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n81, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n81 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n82, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n82 + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.CommandMetadata.Size())) + n83, err := m.CommandMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n83 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SCP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SCP) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n84, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n84 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n85, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n85 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n86, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n86 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionMetadata.Size())) + n87, err := m.SessionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n87 + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n88, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n88 + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.CommandMetadata.Size())) + n89, err := m.CommandMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n89 + if len(m.Path) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if len(m.Action) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Subsystem) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subsystem) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n90, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n90 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n91, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n91 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n92, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n92 + if len(m.Name) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Error) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ClientDisconnect) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientDisconnect) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n93, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n93 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n94, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n94 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n95, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n95 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ServerMetadata.Size())) + n96, err := m.ServerMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n96 + if len(m.Reason) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *AuthAttempt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthAttempt) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n97, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n97 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n98, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n98 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ConnectionMetadata.Size())) + n99, err := m.ConnectionMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n99 + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Status.Size())) + n100, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n100 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ResetPasswordTokenCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetPasswordTokenCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n101, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n101 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n102, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n102 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n103, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n103 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RoleCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n104, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n104 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n105, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n105 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n106, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n106 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *RoleDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n107, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n107 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n108, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n108 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n109, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n109 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TrustedClusterCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrustedClusterCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n110, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n110 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n111, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n111 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n112, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n112 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TrustedClusterDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrustedClusterDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n113, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n113 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n114, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n114 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n115, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n115 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TrustedClusterTokenCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrustedClusterTokenCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n116, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n116 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n117, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n117 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n118, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n118 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GithubConnectorCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GithubConnectorCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n119, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n119 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n120, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n120 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n121, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n121 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *GithubConnectorDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GithubConnectorDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n122, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n122 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n123, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n123 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n124, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n124 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *OIDCConnectorCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OIDCConnectorCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n125, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n125 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n126, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n126 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n127, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *OIDCConnectorDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OIDCConnectorDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n128, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n128 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n129, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n129 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n130, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n130 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SAMLConnectorCreate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SAMLConnectorCreate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n131, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n131 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n132, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n132 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n133, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n133 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SAMLConnectorDelete) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SAMLConnectorDelete) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Metadata.Size())) + n134, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n134 + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResourceMetadata.Size())) + n135, err := m.ResourceMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n135 + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserMetadata.Size())) + n136, err := m.UserMetadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n136 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *OneOf) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OneOf) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Event != nil { + nn137, err := m.Event.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn137 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *OneOf_UserLogin) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.UserLogin != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserLogin.Size())) + n138, err := m.UserLogin.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n138 + } + return i, nil +} +func (m *OneOf_UserCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.UserCreate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserCreate.Size())) + n139, err := m.UserCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n139 + } + return i, nil +} +func (m *OneOf_UserDelete) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.UserDelete != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserDelete.Size())) + n140, err := m.UserDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n140 + } + return i, nil +} +func (m *OneOf_UserPasswordChange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.UserPasswordChange != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.UserPasswordChange.Size())) + n141, err := m.UserPasswordChange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n141 + } + return i, nil +} +func (m *OneOf_SessionStart) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionStart != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionStart.Size())) + n142, err := m.SessionStart.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n142 + } + return i, nil +} +func (m *OneOf_SessionJoin) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionJoin != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionJoin.Size())) + n143, err := m.SessionJoin.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + } + return i, nil +} +func (m *OneOf_SessionPrint) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionPrint != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionPrint.Size())) + n144, err := m.SessionPrint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n144 + } + return i, nil +} +func (m *OneOf_SessionReject) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionReject != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionReject.Size())) + n145, err := m.SessionReject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n145 + } + return i, nil +} +func (m *OneOf_Resize) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resize != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Resize.Size())) + n146, err := m.Resize.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n146 + } + return i, nil +} +func (m *OneOf_SessionEnd) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionEnd != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionEnd.Size())) + n147, err := m.SessionEnd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n147 + } + return i, nil +} +func (m *OneOf_SessionCommand) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionCommand != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionCommand.Size())) + n148, err := m.SessionCommand.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n148 + } + return i, nil +} +func (m *OneOf_SessionDisk) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionDisk != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionDisk.Size())) + n149, err := m.SessionDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n149 + } + return i, nil +} +func (m *OneOf_SessionNetwork) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionNetwork != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionNetwork.Size())) + n150, err := m.SessionNetwork.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n150 + } + return i, nil +} +func (m *OneOf_SessionData) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionData != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionData.Size())) + n151, err := m.SessionData.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n151 + } + return i, nil +} +func (m *OneOf_SessionLeave) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SessionLeave != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SessionLeave.Size())) + n152, err := m.SessionLeave.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n152 + } + return i, nil +} +func (m *OneOf_PortForward) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.PortForward != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.PortForward.Size())) + n153, err := m.PortForward.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n153 + } + return i, nil +} +func (m *OneOf_X11Forward) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.X11Forward != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.X11Forward.Size())) + n154, err := m.X11Forward.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n154 + } + return i, nil +} +func (m *OneOf_SCP) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SCP != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SCP.Size())) + n155, err := m.SCP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n155 + } + return i, nil +} +func (m *OneOf_Exec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Exec != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Exec.Size())) + n156, err := m.Exec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n156 + } + return i, nil +} +func (m *OneOf_Subsystem) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Subsystem != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.Subsystem.Size())) + n157, err := m.Subsystem.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n157 + } + return i, nil +} +func (m *OneOf_ClientDisconnect) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ClientDisconnect != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ClientDisconnect.Size())) + n158, err := m.ClientDisconnect.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n158 + } + return i, nil +} +func (m *OneOf_AuthAttempt) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.AuthAttempt != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.AuthAttempt.Size())) + n159, err := m.AuthAttempt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n159 + } + return i, nil +} +func (m *OneOf_AccessRequestCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.AccessRequestCreate != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.AccessRequestCreate.Size())) + n160, err := m.AccessRequestCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n160 + } + return i, nil +} +func (m *OneOf_ResetPasswordTokenCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResetPasswordTokenCreate != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.ResetPasswordTokenCreate.Size())) + n161, err := m.ResetPasswordTokenCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n161 + } + return i, nil +} +func (m *OneOf_RoleCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RoleCreate != nil { + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.RoleCreate.Size())) + n162, err := m.RoleCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n162 + } + return i, nil +} +func (m *OneOf_RoleDelete) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RoleDelete != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.RoleDelete.Size())) + n163, err := m.RoleDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n163 + } + return i, nil +} +func (m *OneOf_TrustedClusterCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.TrustedClusterCreate != nil { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.TrustedClusterCreate.Size())) + n164, err := m.TrustedClusterCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n164 + } + return i, nil +} +func (m *OneOf_TrustedClusterDelete) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.TrustedClusterDelete != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.TrustedClusterDelete.Size())) + n165, err := m.TrustedClusterDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n165 + } + return i, nil +} +func (m *OneOf_TrustedClusterTokenCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.TrustedClusterTokenCreate != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.TrustedClusterTokenCreate.Size())) + n166, err := m.TrustedClusterTokenCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n166 + } + return i, nil +} +func (m *OneOf_GithubConnectorCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.GithubConnectorCreate != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.GithubConnectorCreate.Size())) + n167, err := m.GithubConnectorCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n167 + } + return i, nil +} +func (m *OneOf_GithubConnectorDelete) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.GithubConnectorDelete != nil { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.GithubConnectorDelete.Size())) + n168, err := m.GithubConnectorDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n168 + } + return i, nil +} +func (m *OneOf_OIDCConnectorCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.OIDCConnectorCreate != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.OIDCConnectorCreate.Size())) + n169, err := m.OIDCConnectorCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n169 + } + return i, nil +} +func (m *OneOf_OIDCConnectorDelete) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.OIDCConnectorDelete != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.OIDCConnectorDelete.Size())) + n170, err := m.OIDCConnectorDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n170 + } + return i, nil +} +func (m *OneOf_SAMLConnectorCreate) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SAMLConnectorCreate != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SAMLConnectorCreate.Size())) + n171, err := m.SAMLConnectorCreate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n171 + } + return i, nil +} +func (m *OneOf_SAMLConnectorDelete) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.SAMLConnectorDelete != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x2 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.SAMLConnectorDelete.Size())) + n172, err := m.SAMLConnectorDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n172 + } + return i, nil +} +func (m *StreamStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UploadID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEvents(dAtA, i, uint64(len(m.UploadID))) + i += copy(dAtA[i:], m.UploadID) + } + if m.LastEventIndex != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEvents(dAtA, i, uint64(m.LastEventIndex)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.LastUploadTime))) + n173, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastUploadTime, dAtA[i:]) + if err != nil { + return 0, err + } + i += n173 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Metadata) Size() (n int) { + var l int + _ = l + if m.Index != 0 { + n += 1 + sovEvents(uint64(m.Index)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.ID) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Code) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionMetadata) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UserMetadata) Size() (n int) { + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Login) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ServerMetadata) Size() (n int) { + var l int + _ = l + l = len(m.ServerNamespace) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.ServerID) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.ServerHostname) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.ServerAddr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if len(m.ServerLabels) > 0 { + for k, v := range m.ServerLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovEvents(uint64(len(k))) + 1 + len(v) + sovEvents(uint64(len(v))) + n += mapEntrySize + 1 + sovEvents(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConnectionMetadata) Size() (n int) { + var l int + _ = l + l = len(m.LocalAddr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RemoteAddr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Protocol) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionStart) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.TerminalSize) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionJoin) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionPrint) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.ChunkIndex != 0 { + n += 1 + sovEvents(uint64(m.ChunkIndex)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Bytes != 0 { + n += 1 + sovEvents(uint64(m.Bytes)) + } + if m.DelayMilliseconds != 0 { + n += 1 + sovEvents(uint64(m.DelayMilliseconds)) + } + if m.Offset != 0 { + n += 1 + sovEvents(uint64(m.Offset)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionReject) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Maximum != 0 { + n += 1 + sovEvents(uint64(m.Maximum)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Resize) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.TerminalSize) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionEnd) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.EnhancedRecording { + n += 2 + } + if m.Interactive { + n += 2 + } + if len(m.Participants) > 0 { + for _, s := range m.Participants { + l = len(s) + n += 1 + l + sovEvents(uint64(l)) + } + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime) + n += 1 + l + sovEvents(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime) + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BPFMetadata) Size() (n int) { + var l int + _ = l + if m.PID != 0 { + n += 1 + sovEvents(uint64(m.PID)) + } + if m.CgroupID != 0 { + n += 1 + sovEvents(uint64(m.CgroupID)) + } + l = len(m.Program) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Status) Size() (n int) { + var l int + _ = l + if m.Success { + n += 2 + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.UserMessage) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionCommand) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.BPFMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.PPID != 0 { + n += 1 + sovEvents(uint64(m.PPID)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if len(m.Argv) > 0 { + for _, s := range m.Argv { + l = len(s) + n += 1 + l + sovEvents(uint64(l)) + } + } + if m.ReturnCode != 0 { + n += 1 + sovEvents(uint64(m.ReturnCode)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionDisk) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.BPFMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Path) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Flags != 0 { + n += 1 + sovEvents(uint64(m.Flags)) + } + if m.ReturnCode != 0 { + n += 1 + sovEvents(uint64(m.ReturnCode)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionNetwork) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.BPFMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.SrcAddr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.DstAddr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.DstPort != 0 { + n += 1 + sovEvents(uint64(m.DstPort)) + } + if m.TCPVersion != 0 { + n += 1 + sovEvents(uint64(m.TCPVersion)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionData) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.BytesTransmitted != 0 { + n += 1 + sovEvents(uint64(m.BytesTransmitted)) + } + if m.BytesReceived != 0 { + n += 1 + sovEvents(uint64(m.BytesReceived)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SessionLeave) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UserLogin) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Method) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.IdentityAttributes != nil { + l = m.IdentityAttributes.Size() + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResourceMetadata) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Expires) + n += 1 + l + sovEvents(uint64(l)) + l = len(m.UpdatedBy) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.TTL) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UserCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovEvents(uint64(l)) + } + } + l = len(m.Connector) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UserDelete) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UserPasswordChange) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AccessRequestCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovEvents(uint64(l)) + } + } + l = len(m.RequestID) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.RequestState) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PortForward) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *X11Forward) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CommandMetadata) Size() (n int) { + var l int + _ = l + l = len(m.Command) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.ExitCode) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exec) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.CommandMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SCP) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.SessionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.CommandMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Path) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Action) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Subsystem) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ClientDisconnect) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ServerMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AuthAttempt) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ConnectionMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResetPasswordTokenCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RoleCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RoleDelete) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TrustedClusterCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TrustedClusterDelete) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TrustedClusterTokenCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GithubConnectorCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GithubConnectorDelete) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *OIDCConnectorCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *OIDCConnectorDelete) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SAMLConnectorCreate) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SAMLConnectorDelete) Size() (n int) { + var l int + _ = l + l = m.Metadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.ResourceMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + l = m.UserMetadata.Size() + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *OneOf) Size() (n int) { + var l int + _ = l + if m.Event != nil { + n += m.Event.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *OneOf_UserLogin) Size() (n int) { + var l int + _ = l + if m.UserLogin != nil { + l = m.UserLogin.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_UserCreate) Size() (n int) { + var l int + _ = l + if m.UserCreate != nil { + l = m.UserCreate.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_UserDelete) Size() (n int) { + var l int + _ = l + if m.UserDelete != nil { + l = m.UserDelete.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_UserPasswordChange) Size() (n int) { + var l int + _ = l + if m.UserPasswordChange != nil { + l = m.UserPasswordChange.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionStart) Size() (n int) { + var l int + _ = l + if m.SessionStart != nil { + l = m.SessionStart.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionJoin) Size() (n int) { + var l int + _ = l + if m.SessionJoin != nil { + l = m.SessionJoin.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionPrint) Size() (n int) { + var l int + _ = l + if m.SessionPrint != nil { + l = m.SessionPrint.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionReject) Size() (n int) { + var l int + _ = l + if m.SessionReject != nil { + l = m.SessionReject.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_Resize) Size() (n int) { + var l int + _ = l + if m.Resize != nil { + l = m.Resize.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionEnd) Size() (n int) { + var l int + _ = l + if m.SessionEnd != nil { + l = m.SessionEnd.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionCommand) Size() (n int) { + var l int + _ = l + if m.SessionCommand != nil { + l = m.SessionCommand.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionDisk) Size() (n int) { + var l int + _ = l + if m.SessionDisk != nil { + l = m.SessionDisk.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionNetwork) Size() (n int) { + var l int + _ = l + if m.SessionNetwork != nil { + l = m.SessionNetwork.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionData) Size() (n int) { + var l int + _ = l + if m.SessionData != nil { + l = m.SessionData.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SessionLeave) Size() (n int) { + var l int + _ = l + if m.SessionLeave != nil { + l = m.SessionLeave.Size() + n += 1 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_PortForward) Size() (n int) { + var l int + _ = l + if m.PortForward != nil { + l = m.PortForward.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_X11Forward) Size() (n int) { + var l int + _ = l + if m.X11Forward != nil { + l = m.X11Forward.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SCP) Size() (n int) { + var l int + _ = l + if m.SCP != nil { + l = m.SCP.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_Exec) Size() (n int) { + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_Subsystem) Size() (n int) { + var l int + _ = l + if m.Subsystem != nil { + l = m.Subsystem.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_ClientDisconnect) Size() (n int) { + var l int + _ = l + if m.ClientDisconnect != nil { + l = m.ClientDisconnect.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_AuthAttempt) Size() (n int) { + var l int + _ = l + if m.AuthAttempt != nil { + l = m.AuthAttempt.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_AccessRequestCreate) Size() (n int) { + var l int + _ = l + if m.AccessRequestCreate != nil { + l = m.AccessRequestCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_ResetPasswordTokenCreate) Size() (n int) { + var l int + _ = l + if m.ResetPasswordTokenCreate != nil { + l = m.ResetPasswordTokenCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_RoleCreate) Size() (n int) { + var l int + _ = l + if m.RoleCreate != nil { + l = m.RoleCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_RoleDelete) Size() (n int) { + var l int + _ = l + if m.RoleDelete != nil { + l = m.RoleDelete.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_TrustedClusterCreate) Size() (n int) { + var l int + _ = l + if m.TrustedClusterCreate != nil { + l = m.TrustedClusterCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_TrustedClusterDelete) Size() (n int) { + var l int + _ = l + if m.TrustedClusterDelete != nil { + l = m.TrustedClusterDelete.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_TrustedClusterTokenCreate) Size() (n int) { + var l int + _ = l + if m.TrustedClusterTokenCreate != nil { + l = m.TrustedClusterTokenCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_GithubConnectorCreate) Size() (n int) { + var l int + _ = l + if m.GithubConnectorCreate != nil { + l = m.GithubConnectorCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_GithubConnectorDelete) Size() (n int) { + var l int + _ = l + if m.GithubConnectorDelete != nil { + l = m.GithubConnectorDelete.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_OIDCConnectorCreate) Size() (n int) { + var l int + _ = l + if m.OIDCConnectorCreate != nil { + l = m.OIDCConnectorCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_OIDCConnectorDelete) Size() (n int) { + var l int + _ = l + if m.OIDCConnectorDelete != nil { + l = m.OIDCConnectorDelete.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SAMLConnectorCreate) Size() (n int) { + var l int + _ = l + if m.SAMLConnectorCreate != nil { + l = m.SAMLConnectorCreate.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *OneOf_SAMLConnectorDelete) Size() (n int) { + var l int + _ = l + if m.SAMLConnectorDelete != nil { + l = m.SAMLConnectorDelete.Size() + n += 2 + l + sovEvents(uint64(l)) + } + return n +} +func (m *StreamStatus) Size() (n int) { + var l int + _ = l + l = len(m.UploadID) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.LastEventIndex != 0 { + n += 1 + sovEvents(uint64(m.LastEventIndex)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LastUploadTime) + n += 1 + l + sovEvents(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEvents(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Code = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Login", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Login = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServerLabels == nil { + m.ServerLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthEvents + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthEvents + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ServerLabels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LocalAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoteAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemoteAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionStart) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionStart: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionStart: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminalSize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminalSize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionJoin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionJoin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionJoin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionPrint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionPrint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionPrint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkIndex", wireType) + } + m.ChunkIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChunkIndex |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + m.Bytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Bytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelayMilliseconds", wireType) + } + m.DelayMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelayMilliseconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionReject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionReject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionReject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) + } + m.Maximum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Maximum |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resize) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resize: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resize: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminalSize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminalSize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionEnd) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionEnd: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionEnd: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnhancedRecording", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.EnhancedRecording = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Interactive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Interactive = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Participants = append(m.Participants, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EndTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BPFMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BPFMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BPFMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) + } + m.PID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupID", wireType) + } + m.CgroupID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CgroupID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Program", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Program = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Success = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMessage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserMessage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionCommand) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionCommand: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionCommand: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BPFMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BPFMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PPID", wireType) + } + m.PPID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PPID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Argv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Argv = append(m.Argv, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReturnCode", wireType) + } + m.ReturnCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReturnCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionDisk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionDisk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionDisk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BPFMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BPFMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReturnCode", wireType) + } + m.ReturnCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReturnCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionNetwork) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionNetwork: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionNetwork: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BPFMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BPFMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SrcAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SrcAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DstAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DstAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DstPort", wireType) + } + m.DstPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DstPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPVersion", wireType) + } + m.TCPVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TCPVersion |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesTransmitted", wireType) + } + m.BytesTransmitted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesTransmitted |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesReceived", wireType) + } + m.BytesReceived = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesReceived |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionLeave) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionLeave: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionLeave: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserLogin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserLogin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserLogin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IdentityAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IdentityAttributes == nil { + m.IdentityAttributes = &Struct{} + } + if err := m.IdentityAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Expires, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdatedBy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TTL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Connector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserPasswordChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserPasswordChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserPasswordChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AccessRequestCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AccessRequestCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AccessRequestCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortForward) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortForward: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortForward: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *X11Forward) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: X11Forward: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: X11Forward: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommandMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommandMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommandMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExitCode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommandMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommandMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SCP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SCP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SCP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SessionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommandMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommandMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subsystem) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subsystem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subsystem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientDisconnect) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientDisconnect: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientDisconnect: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServerMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthAttempt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthAttempt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthAttempt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetPasswordTokenCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetPasswordTokenCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetPasswordTokenCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TrustedClusterCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TrustedClusterCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TrustedClusterCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TrustedClusterDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TrustedClusterDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TrustedClusterDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TrustedClusterTokenCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TrustedClusterTokenCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TrustedClusterTokenCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GithubConnectorCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GithubConnectorCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GithubConnectorCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GithubConnectorDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GithubConnectorDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GithubConnectorDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OIDCConnectorCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OIDCConnectorCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OIDCConnectorCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OIDCConnectorDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OIDCConnectorDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OIDCConnectorDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SAMLConnectorCreate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SAMLConnectorCreate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SAMLConnectorCreate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SAMLConnectorDelete) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SAMLConnectorDelete: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SAMLConnectorDelete: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ResourceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OneOf) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OneOf: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OneOf: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserLogin", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &UserLogin{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_UserLogin{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &UserCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_UserCreate{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &UserDelete{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_UserDelete{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserPasswordChange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &UserPasswordChange{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_UserPasswordChange{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionStart{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionStart{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionJoin", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionJoin{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionJoin{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionPrint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionPrint{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionPrint{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionReject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionReject{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionReject{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resize{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_Resize{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionEnd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionEnd{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionEnd{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCommand", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionCommand{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionCommand{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionDisk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionDisk{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionNetwork", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionNetwork{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionNetwork{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionData{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionData{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionLeave", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SessionLeave{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SessionLeave{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortForward", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PortForward{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_PortForward{v} + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field X11Forward", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &X11Forward{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_X11Forward{v} + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SCP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SCP{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SCP{v} + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Exec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_Exec{v} + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsystem", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Subsystem{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_Subsystem{v} + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientDisconnect", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ClientDisconnect{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_ClientDisconnect{v} + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthAttempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &AuthAttempt{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_AuthAttempt{v} + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessRequestCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &AccessRequestCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_AccessRequestCreate{v} + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetPasswordTokenCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResetPasswordTokenCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_ResetPasswordTokenCreate{v} + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RoleCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_RoleCreate{v} + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RoleDelete{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_RoleDelete{v} + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedClusterCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TrustedClusterCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_TrustedClusterCreate{v} + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedClusterDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TrustedClusterDelete{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_TrustedClusterDelete{v} + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedClusterTokenCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TrustedClusterTokenCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_TrustedClusterTokenCreate{v} + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GithubConnectorCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GithubConnectorCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_GithubConnectorCreate{v} + iNdEx = postIndex + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GithubConnectorDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GithubConnectorDelete{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_GithubConnectorDelete{v} + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OIDCConnectorCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &OIDCConnectorCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_OIDCConnectorCreate{v} + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OIDCConnectorDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &OIDCConnectorDelete{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_OIDCConnectorDelete{v} + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SAMLConnectorCreate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SAMLConnectorCreate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SAMLConnectorCreate{v} + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SAMLConnectorDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SAMLConnectorDelete{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Event = &OneOf_SAMLConnectorDelete{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UploadID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastEventIndex", wireType) + } + m.LastEventIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastEventIndex |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUploadTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LastUploadTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEvents + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEvents(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("events.proto", fileDescriptor_events_577f8782c6396ca4) } + +var fileDescriptor_events_577f8782c6396ca4 = []byte{ + // 2864 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x1c, 0x49, + 0xf5, 0x9f, 0xdf, 0x3f, 0x6a, 0x26, 0x89, 0x5d, 0x76, 0x92, 0x4e, 0x36, 0x49, 0x3b, 0x9d, 0xef, + 0x46, 0x5e, 0xed, 0xae, 0xa3, 0x78, 0xfd, 0xdd, 0x2c, 0x2b, 0xd0, 0xae, 0x67, 0xc6, 0x61, 0x8c, + 0x9c, 0xf5, 0xa8, 0xec, 0xc0, 0x5e, 0x20, 0x6a, 0x4f, 0x57, 0xc6, 0x4d, 0x66, 0xba, 0x87, 0xee, + 0x1a, 0xaf, 0x9d, 0x13, 0x2b, 0x38, 0x70, 0xd8, 0x03, 0x82, 0x0b, 0x12, 0x07, 0xb8, 0x70, 0xe3, + 0x00, 0x9c, 0xb8, 0x03, 0x52, 0x04, 0x02, 0x56, 0x0b, 0x57, 0x06, 0x08, 0xb7, 0xf9, 0x13, 0x56, + 0x42, 0x42, 0xf5, 0xaa, 0x7a, 0xba, 0xba, 0xa7, 0xed, 0xec, 0xc6, 0x91, 0x2c, 0x7b, 0x7d, 0x4a, + 0xe6, 0x7d, 0xde, 0x7b, 0x55, 0xf5, 0xde, 0xab, 0xea, 0xf7, 0x5e, 0x95, 0x51, 0x95, 0xee, 0x50, + 0x87, 0xf9, 0x0b, 0x7d, 0xcf, 0x65, 0x2e, 0x2e, 0x88, 0x5f, 0x97, 0x67, 0x3b, 0x6e, 0xc7, 0x05, + 0xd2, 0x2d, 0xfe, 0x3f, 0x81, 0x5e, 0xd6, 0x3b, 0xae, 0xdb, 0xe9, 0xd2, 0x5b, 0xf0, 0x6b, 0x6b, + 0xf0, 0xf0, 0x16, 0xb3, 0x7b, 0xd4, 0x67, 0x66, 0xaf, 0x2f, 0x19, 0xae, 0xc4, 0x19, 0x7c, 0xe6, + 0x0d, 0xda, 0x4c, 0xa0, 0xc6, 0xdf, 0xd3, 0xa8, 0x74, 0x8f, 0x32, 0xd3, 0x32, 0x99, 0x89, 0xaf, + 0xa0, 0xfc, 0xaa, 0x63, 0xd1, 0x5d, 0x2d, 0x3d, 0x97, 0x9e, 0xcf, 0xd6, 0x0a, 0xa3, 0xa1, 0x9e, + 0xa1, 0x36, 0x11, 0x44, 0x7c, 0x15, 0xe5, 0x36, 0xf7, 0xfa, 0x54, 0xcb, 0xcc, 0xa5, 0xe7, 0xcb, + 0xb5, 0xf2, 0x68, 0xa8, 0xe7, 0x61, 0x66, 0x04, 0xc8, 0xf8, 0x3a, 0xca, 0xac, 0x36, 0xb4, 0x2c, + 0x80, 0xd3, 0xa3, 0xa1, 0x7e, 0x66, 0x60, 0x5b, 0xaf, 0xb9, 0x3d, 0x9b, 0xd1, 0x5e, 0x9f, 0xed, + 0x91, 0xcc, 0x6a, 0x03, 0xdf, 0x44, 0xb9, 0xba, 0x6b, 0x51, 0x2d, 0x07, 0x4c, 0x78, 0x34, 0xd4, + 0xcf, 0xb6, 0x5d, 0x8b, 0x2a, 0x5c, 0x80, 0xe3, 0x77, 0x51, 0x6e, 0xd3, 0xee, 0x51, 0x2d, 0x3f, + 0x97, 0x9e, 0xaf, 0x2c, 0x5e, 0x5e, 0x10, 0x2b, 0x58, 0x08, 0x56, 0xb0, 0xb0, 0x19, 0x2c, 0xb1, + 0x36, 0xf5, 0x64, 0xa8, 0xa7, 0x46, 0x43, 0x3d, 0xc7, 0x57, 0xfd, 0xc3, 0x7f, 0xea, 0x69, 0x02, + 0x92, 0xc6, 0x5b, 0xe8, 0xdc, 0x06, 0xf5, 0x7d, 0xdb, 0x75, 0xc6, 0x8b, 0x7b, 0x19, 0x95, 0x25, + 0x69, 0xb5, 0x01, 0x0b, 0x2c, 0xd7, 0x8a, 0xa3, 0xa1, 0x9e, 0xf5, 0x6d, 0x8b, 0x84, 0x88, 0xf1, + 0x0d, 0x54, 0xbd, 0xef, 0x53, 0x4f, 0xb1, 0x49, 0x8e, 0xff, 0x96, 0x12, 0x25, 0x3e, 0xd6, 0xc0, + 0xa7, 0x1e, 0x01, 0x2a, 0x7e, 0x05, 0xe5, 0xd7, 0xdc, 0x8e, 0xed, 0x48, 0xa3, 0xcc, 0x8c, 0x86, + 0xfa, 0xb9, 0x2e, 0x27, 0x28, 0x6b, 0x12, 0x1c, 0xc6, 0xaf, 0xb2, 0xe8, 0xec, 0x06, 0xf5, 0x76, + 0x14, 0xdd, 0xcb, 0x7c, 0x96, 0x9c, 0xf2, 0x9e, 0xd9, 0xa3, 0x7e, 0xdf, 0x6c, 0x53, 0x39, 0xcc, + 0xc5, 0xd1, 0x50, 0x9f, 0x71, 0x02, 0xa2, 0xa2, 0x2b, 0xce, 0x8f, 0x5f, 0x41, 0x25, 0x41, 0x5a, + 0x6d, 0xc8, 0x39, 0x9c, 0x19, 0x0d, 0xf5, 0xb2, 0x0f, 0xb4, 0x07, 0xb6, 0x45, 0xc6, 0x30, 0x5e, + 0x09, 0xc6, 0x6f, 0xba, 0x3e, 0xe3, 0xca, 0xa5, 0xb3, 0xae, 0x8e, 0x86, 0xfa, 0x25, 0x29, 0xb0, + 0x2d, 0x21, 0x65, 0xc8, 0x98, 0x10, 0xfe, 0x12, 0x42, 0x82, 0xb2, 0x6c, 0x59, 0x9e, 0x74, 0xe5, + 0xa5, 0xd1, 0x50, 0x3f, 0x2f, 0x55, 0x98, 0x96, 0xe5, 0x29, 0xe2, 0x0a, 0x33, 0xee, 0xa1, 0xaa, + 0xf8, 0xb5, 0x66, 0x6e, 0xd1, 0xae, 0xaf, 0xe5, 0xe7, 0xb2, 0xf3, 0x95, 0xc5, 0xf9, 0x05, 0x19, + 0xee, 0x51, 0xeb, 0x2c, 0xa8, 0xac, 0x2b, 0x0e, 0xf3, 0xf6, 0x6a, 0xba, 0xf4, 0xf6, 0x45, 0x39, + 0x54, 0x17, 0x30, 0x65, 0xb0, 0x88, 0xfa, 0xcb, 0xef, 0xa0, 0xe9, 0x09, 0x1d, 0x78, 0x0a, 0x65, + 0x1f, 0xd1, 0x3d, 0x61, 0x67, 0xc2, 0xff, 0x8b, 0x67, 0x51, 0x7e, 0xc7, 0xec, 0x0e, 0x64, 0x60, + 0x13, 0xf1, 0xe3, 0xed, 0xcc, 0x5b, 0x69, 0xe3, 0xb7, 0x69, 0x84, 0xeb, 0xae, 0xe3, 0xd0, 0x36, + 0x53, 0x23, 0xe9, 0x4d, 0x54, 0x5e, 0x73, 0xdb, 0x66, 0x17, 0x0c, 0x20, 0x1c, 0xa6, 0x8d, 0x86, + 0xfa, 0x2c, 0x5f, 0xf9, 0x42, 0x97, 0x23, 0xca, 0x94, 0x42, 0x56, 0x6e, 0x39, 0x42, 0x7b, 0x2e, + 0xa3, 0x20, 0x98, 0x09, 0x2d, 0x07, 0x82, 0x1e, 0x40, 0xaa, 0xe5, 0x42, 0x66, 0x7c, 0x0b, 0x95, + 0x5a, 0x3c, 0xfa, 0xdb, 0x6e, 0x57, 0x7a, 0x0d, 0x42, 0x0d, 0x76, 0x84, 0x22, 0x32, 0x66, 0x32, + 0xbe, 0x97, 0xe5, 0xb6, 0x86, 0xa0, 0xde, 0x60, 0xa6, 0xc7, 0xf0, 0xdb, 0xe1, 0x3e, 0x87, 0x39, + 0x57, 0x16, 0xa7, 0x02, 0xbb, 0x07, 0xf4, 0x5a, 0x95, 0xdb, 0xf7, 0xe3, 0xa1, 0x9e, 0x1e, 0x0d, + 0xf5, 0x14, 0x29, 0x29, 0x0b, 0x16, 0x7b, 0x20, 0x03, 0x72, 0xb3, 0x81, 0x9c, 0xba, 0x4f, 0x62, + 0xb2, 0x62, 0x77, 0xbc, 0x83, 0x8a, 0x72, 0x0e, 0x30, 0xe9, 0xca, 0xe2, 0xc5, 0xd0, 0xd5, 0x91, + 0xcd, 0x19, 0x93, 0x0e, 0xa4, 0xf0, 0x97, 0x51, 0x41, 0x78, 0x10, 0xe2, 0xac, 0xb2, 0x78, 0x21, + 0x39, 0x54, 0x62, 0xe2, 0x52, 0x06, 0x37, 0x11, 0x0a, 0xbd, 0x37, 0x3e, 0x4c, 0xa4, 0x86, 0x49, + 0xbf, 0xc6, 0xb4, 0x28, 0xb2, 0xf8, 0x4d, 0x54, 0xdd, 0xa4, 0x5e, 0xcf, 0x76, 0xcc, 0xee, 0x86, + 0xfd, 0x98, 0x6a, 0x85, 0xf0, 0x00, 0xf3, 0xed, 0xc7, 0xaa, 0xd3, 0x22, 0x7c, 0xc6, 0x1f, 0x33, + 0xa8, 0x22, 0xd7, 0xf2, 0x35, 0xd7, 0x76, 0x4e, 0x9d, 0x70, 0x08, 0x27, 0x18, 0x1f, 0x65, 0xc6, + 0x21, 0xdd, 0xf2, 0x6c, 0xe7, 0x70, 0x21, 0x7d, 0x13, 0xa1, 0xfa, 0xf6, 0xc0, 0x79, 0x24, 0xbe, + 0x77, 0x99, 0xf0, 0x7b, 0xd7, 0xb6, 0x89, 0x82, 0xf0, 0x8f, 0x5e, 0x83, 0xeb, 0xe7, 0xa6, 0xab, + 0xd6, 0xca, 0x4f, 0x84, 0xa6, 0xf4, 0xeb, 0x04, 0xc8, 0x58, 0x47, 0xf9, 0xda, 0x1e, 0xa3, 0x3e, + 0x98, 0x26, 0x2b, 0x3e, 0x8a, 0x5b, 0x9c, 0x40, 0x04, 0x1d, 0x2f, 0xa1, 0xe9, 0x06, 0xed, 0x9a, + 0x7b, 0xf7, 0xec, 0x6e, 0xd7, 0xf6, 0x69, 0xdb, 0x75, 0x2c, 0x1f, 0xac, 0x20, 0x87, 0xeb, 0xf9, + 0x64, 0x92, 0x01, 0x1b, 0xa8, 0xb0, 0xfe, 0xf0, 0xa1, 0x4f, 0x19, 0x44, 0x5a, 0xb6, 0x86, 0x46, + 0x43, 0xbd, 0xe0, 0x02, 0x85, 0x48, 0xc4, 0xf8, 0x24, 0x83, 0xce, 0x48, 0x73, 0x10, 0xfa, 0x6d, + 0xda, 0x3e, 0x9a, 0x2d, 0x1e, 0x06, 0x47, 0xf6, 0xd0, 0xc1, 0x91, 0x3b, 0xc4, 0x0e, 0x35, 0x50, + 0x81, 0x50, 0xd3, 0x97, 0x21, 0x56, 0x16, 0x16, 0xf3, 0x80, 0x42, 0x24, 0x82, 0xaf, 0xa3, 0xe2, + 0x3d, 0x73, 0xd7, 0xee, 0x0d, 0x7a, 0xd2, 0xac, 0xf0, 0xfd, 0xef, 0x99, 0xbb, 0x24, 0xa0, 0x1b, + 0xff, 0xcd, 0x70, 0x3d, 0x7c, 0x4f, 0x1f, 0xcf, 0xbd, 0xfa, 0xe2, 0x0c, 0x1a, 0x3a, 0x36, 0xff, + 0x1c, 0x8e, 0x7d, 0xde, 0x03, 0xf3, 0xd7, 0x79, 0x9e, 0x5d, 0xc0, 0x5a, 0x56, 0x1c, 0xeb, 0xd4, + 0x07, 0x87, 0xf1, 0x41, 0x03, 0x4d, 0xaf, 0x38, 0xdb, 0xa6, 0xd3, 0xa6, 0x16, 0xa1, 0x6d, 0xd7, + 0xb3, 0x6c, 0xa7, 0x03, 0x8e, 0x28, 0xd5, 0x2e, 0x8c, 0x86, 0x3a, 0xa6, 0x12, 0x7c, 0xe0, 0x05, + 0x28, 0x99, 0x14, 0xc0, 0xb7, 0x51, 0x65, 0xd5, 0x61, 0xd4, 0x33, 0xdb, 0xcc, 0xde, 0xa1, 0x5a, + 0x11, 0xe4, 0xcf, 0x8d, 0x86, 0x7a, 0xc5, 0x0e, 0xc9, 0x44, 0xe5, 0xc1, 0x4b, 0xa8, 0xda, 0x32, + 0x3d, 0x66, 0xb7, 0xed, 0xbe, 0xe9, 0x30, 0x5f, 0x2b, 0xcd, 0x65, 0xe7, 0xcb, 0xb5, 0xa9, 0xd1, + 0x50, 0xaf, 0xf6, 0x15, 0x3a, 0x89, 0x70, 0xe1, 0x6f, 0xa2, 0x32, 0x64, 0x2a, 0x90, 0xf9, 0x97, + 0x9f, 0x99, 0xf9, 0xdf, 0x08, 0x73, 0x41, 0x30, 0xfb, 0x03, 0x9f, 0x0b, 0x87, 0x81, 0x05, 0xc5, + 0x40, 0xa8, 0x11, 0xbf, 0x8f, 0x8a, 0x2b, 0x8e, 0x05, 0xca, 0xd1, 0x33, 0x95, 0x1b, 0x52, 0xf9, + 0x85, 0x50, 0xb9, 0xdb, 0x8f, 0xe9, 0x0e, 0xd4, 0x19, 0x8f, 0x51, 0xa5, 0xd6, 0xba, 0x3b, 0x8e, + 0xbb, 0x4b, 0x28, 0xdb, 0x92, 0x15, 0x46, 0x4e, 0x9c, 0x30, 0x7d, 0xdb, 0x22, 0x9c, 0xc6, 0x93, + 0xf5, 0x7a, 0xc7, 0x73, 0x07, 0x7d, 0x99, 0xac, 0xe7, 0x44, 0xb2, 0xde, 0x06, 0x1a, 0x24, 0xeb, + 0x01, 0x8c, 0x5f, 0x46, 0xc5, 0x96, 0xe7, 0x76, 0x3c, 0xb3, 0x27, 0xf3, 0xbd, 0xca, 0x68, 0xa8, + 0x17, 0xfb, 0x82, 0x44, 0x02, 0xcc, 0xf8, 0x71, 0x1a, 0x15, 0x36, 0x98, 0xc9, 0x06, 0x3e, 0x97, + 0xd8, 0x18, 0xb4, 0xdb, 0xd4, 0xf7, 0x61, 0xec, 0x92, 0x90, 0xf0, 0x05, 0x89, 0x04, 0x18, 0xaf, + 0x58, 0x56, 0x3c, 0xcf, 0xf5, 0xd4, 0x8a, 0x85, 0x72, 0x82, 0x5a, 0xb1, 0x00, 0x07, 0xbe, 0x83, + 0x2a, 0x62, 0xb7, 0xf8, 0xbe, 0xd9, 0x09, 0xaa, 0x85, 0xf3, 0xa3, 0xa1, 0x3e, 0xdd, 0x13, 0x24, + 0x45, 0x44, 0xe5, 0x34, 0x7e, 0x07, 0xa5, 0x0e, 0x58, 0xae, 0xee, 0xf6, 0x7a, 0xe6, 0x71, 0xdd, + 0xc9, 0x87, 0xcb, 0x7c, 0xde, 0x40, 0xd9, 0x5a, 0xeb, 0xae, 0xdc, 0xba, 0x33, 0x81, 0xa8, 0x12, + 0x2a, 0x31, 0x39, 0xce, 0xcd, 0xcb, 0xcd, 0x16, 0x0f, 0x9f, 0x02, 0x84, 0x07, 0x94, 0x9b, 0x7d, + 0x1e, 0x3f, 0x40, 0x05, 0xd4, 0x64, 0xdb, 0xb0, 0x0b, 0x65, 0x31, 0xda, 0x37, 0xd9, 0x36, 0x01, + 0x2a, 0x47, 0x97, 0xbd, 0xce, 0x8e, 0xdc, 0x6f, 0x80, 0x9a, 0x5e, 0x67, 0x87, 0x00, 0x15, 0xdf, + 0xe2, 0xd5, 0x07, 0x1b, 0x78, 0x0e, 0x94, 0xe0, 0x7c, 0x83, 0xe5, 0xc5, 0x3e, 0xf6, 0x80, 0xfa, + 0x80, 0x57, 0xe2, 0x44, 0x61, 0x31, 0x7e, 0x91, 0x1d, 0x27, 0xaf, 0x0d, 0xdb, 0x7f, 0x74, 0xea, + 0xc2, 0xcf, 0xe1, 0x42, 0xee, 0xa4, 0x42, 0xa2, 0x93, 0x74, 0x94, 0xbf, 0xdb, 0x35, 0x3b, 0x3e, + 0xf8, 0x30, 0x2f, 0x32, 0xc6, 0x87, 0x9c, 0x40, 0x04, 0x3d, 0xe6, 0xa7, 0xd2, 0xb3, 0xfd, 0xf4, + 0x8f, 0x70, 0xb7, 0xbd, 0x47, 0xd9, 0x07, 0xae, 0x77, 0xea, 0xaa, 0xcf, 0xea, 0xaa, 0x9b, 0xa8, + 0xb8, 0xe1, 0xb5, 0xa1, 0x1c, 0x17, 0xde, 0xaa, 0x8e, 0x86, 0x7a, 0xc9, 0xf7, 0xda, 0xd0, 0xc5, + 0x20, 0x01, 0xc8, 0xf9, 0x1a, 0x3e, 0x03, 0xbe, 0x62, 0xc8, 0x67, 0xf9, 0x4c, 0xf2, 0x49, 0x50, + 0xf2, 0xb5, 0x5c, 0x8f, 0x49, 0xc7, 0x8d, 0xf9, 0xfa, 0xae, 0xc7, 0x48, 0x00, 0xe2, 0x57, 0x11, + 0xda, 0xac, 0xb7, 0xbe, 0x4e, 0x3d, 0x30, 0x97, 0xd8, 0x8b, 0x70, 0x5c, 0xef, 0x08, 0x12, 0x51, + 0x60, 0xe3, 0x97, 0xca, 0x3e, 0xe4, 0x0e, 0x3a, 0x2d, 0x22, 0x0f, 0x91, 0x52, 0x2d, 0xa2, 0x29, + 0x28, 0xcc, 0x36, 0x3d, 0xd3, 0xf1, 0x7b, 0x36, 0x63, 0xd4, 0x92, 0x67, 0x2d, 0x94, 0x63, 0x6c, + 0x97, 0x4c, 0xe0, 0xf8, 0x35, 0x74, 0x06, 0x68, 0x84, 0xb6, 0xa9, 0xbd, 0x43, 0x2d, 0x88, 0x01, + 0x29, 0xe0, 0xed, 0x92, 0x28, 0x68, 0xfc, 0x29, 0x2c, 0x53, 0xd7, 0xa8, 0xb9, 0x43, 0x4f, 0xfd, + 0x75, 0x98, 0xa2, 0xff, 0xe3, 0x0c, 0x2a, 0xf3, 0x15, 0x41, 0x0f, 0xf5, 0x48, 0x4c, 0xb9, 0x14, + 0x64, 0x58, 0xd2, 0x92, 0x67, 0xc7, 0x96, 0x00, 0xea, 0x84, 0x05, 0x44, 0x36, 0xf6, 0x1a, 0x2a, + 0xdc, 0xa3, 0x6c, 0xdb, 0xb5, 0x64, 0x87, 0x74, 0x76, 0x34, 0xd4, 0xa7, 0x7a, 0x40, 0x51, 0xb2, + 0x26, 0xc9, 0x83, 0x1f, 0x21, 0xbc, 0x6a, 0x51, 0x87, 0xd9, 0x6c, 0x6f, 0x99, 0x31, 0xcf, 0xde, + 0x1a, 0x30, 0xea, 0x4b, 0xbb, 0x5d, 0x9c, 0xc8, 0x53, 0x37, 0xa0, 0x81, 0x5f, 0x33, 0xa0, 0xe7, + 0x38, 0x66, 0x0f, 0xd5, 0x7e, 0x3a, 0xd4, 0x0b, 0x82, 0x87, 0x24, 0xa8, 0x35, 0x9e, 0xa6, 0xd1, + 0x14, 0xa1, 0xbe, 0x3b, 0xf0, 0xda, 0x54, 0xe9, 0x87, 0xe4, 0xde, 0x33, 0x7b, 0x41, 0xff, 0x19, + 0x0a, 0xb5, 0x58, 0x1f, 0x18, 0x70, 0xbc, 0x8a, 0x8a, 0x2b, 0xbb, 0x7d, 0xdb, 0xa3, 0xbe, 0x34, + 0xe4, 0x41, 0x69, 0xf4, 0x8c, 0x4c, 0xa3, 0x8b, 0x54, 0x88, 0xc8, 0xbc, 0x59, 0xfc, 0xc0, 0x6f, + 0xa2, 0xf2, 0xfd, 0xbe, 0x65, 0x32, 0x6a, 0xd5, 0xf6, 0x64, 0x72, 0x09, 0x6d, 0xd4, 0x81, 0x20, + 0x3e, 0xd8, 0xda, 0x53, 0xdb, 0xa8, 0x63, 0x56, 0x7c, 0x03, 0x65, 0x37, 0x37, 0xd7, 0xa4, 0x5d, + 0xe1, 0xa6, 0x81, 0x31, 0xb5, 0xe3, 0xca, 0x51, 0xe3, 0x47, 0x19, 0x84, 0xb8, 0xfb, 0xea, 0x1e, + 0x35, 0xd9, 0xd1, 0xec, 0xc1, 0x1a, 0x2a, 0x05, 0x66, 0x96, 0xa1, 0xa3, 0x05, 0xb2, 0x71, 0xf3, + 0xc7, 0xc7, 0x0e, 0x70, 0x9e, 0x2d, 0x10, 0xb7, 0x0b, 0xfd, 0xa5, 0x6c, 0x70, 0xe9, 0xe2, 0x71, + 0x02, 0x11, 0x74, 0xfc, 0x2a, 0x2a, 0xcb, 0xdd, 0xe2, 0x7a, 0xb2, 0xf5, 0x21, 0x6a, 0x8a, 0x80, + 0x48, 0x42, 0xdc, 0xf8, 0x7d, 0x5a, 0x18, 0xa5, 0x41, 0xbb, 0xf4, 0xf8, 0x1a, 0xc5, 0xf8, 0x41, + 0x1a, 0x61, 0xae, 0xac, 0x65, 0xfa, 0xfe, 0x07, 0xae, 0x67, 0xd5, 0xb7, 0x4d, 0xa7, 0x73, 0x24, + 0xcb, 0x31, 0x3e, 0xcd, 0xa0, 0x99, 0x65, 0x51, 0x61, 0xd1, 0xef, 0x0c, 0xa8, 0xcf, 0x4e, 0x7a, + 0xbc, 0xfd, 0x1f, 0x2a, 0xcb, 0x95, 0xae, 0x36, 0x64, 0xbc, 0xc1, 0x77, 0xd0, 0xb6, 0x48, 0x08, + 0xe0, 0xd7, 0x51, 0x55, 0xfe, 0xe0, 0xc7, 0x61, 0xd0, 0xfe, 0x01, 0x6d, 0x3e, 0x27, 0x90, 0x08, + 0x8c, 0xff, 0x1f, 0x95, 0x79, 0x48, 0x76, 0x4c, 0x1e, 0xc4, 0xc5, 0xf0, 0x06, 0xcc, 0x0a, 0x88, + 0xea, 0x41, 0x30, 0xe6, 0x34, 0x7e, 0x96, 0x41, 0x15, 0x9e, 0x4e, 0xdd, 0x75, 0xbd, 0x0f, 0x4c, + 0xef, 0x68, 0x6a, 0xcc, 0xe8, 0x97, 0x2e, 0x7b, 0x88, 0xcc, 0x24, 0xfc, 0xce, 0xe4, 0x3e, 0xc7, + 0x77, 0x86, 0xd7, 0x7c, 0x3c, 0x2d, 0xcd, 0x87, 0xc5, 0x06, 0xa4, 0xa4, 0x40, 0x35, 0xbe, 0x9b, + 0x41, 0xe8, 0xfd, 0xdb, 0xb7, 0xbf, 0xc0, 0x06, 0x32, 0x7e, 0x9a, 0x46, 0xe7, 0x64, 0x13, 0x42, + 0xb9, 0x0a, 0x2e, 0x4a, 0x92, 0xfc, 0xde, 0x41, 0xee, 0xdd, 0x16, 0x24, 0x12, 0x60, 0x78, 0x11, + 0x95, 0x56, 0x76, 0x6d, 0x06, 0x75, 0x98, 0xe8, 0x96, 0x88, 0xbe, 0x99, 0xa4, 0xa9, 0xf7, 0x6e, + 0x01, 0x1f, 0x7e, 0x3d, 0x68, 0xaf, 0x64, 0xc3, 0x30, 0xe6, 0x02, 0x2b, 0x89, 0x2d, 0x16, 0xe3, + 0xfb, 0x59, 0x94, 0x5b, 0xd9, 0xa5, 0xed, 0x63, 0xee, 0x1a, 0x25, 0xdd, 0xcc, 0x1d, 0x32, 0xdd, + 0x7c, 0x9e, 0x4e, 0xe7, 0x3b, 0xa1, 0x3f, 0x0b, 0xd1, 0xe1, 0x63, 0x9e, 0x8f, 0x0f, 0x2f, 0x61, + 0xe3, 0x0f, 0x59, 0x94, 0xdd, 0xa8, 0xb7, 0x4e, 0xbd, 0x70, 0x94, 0x5e, 0x78, 0x46, 0x77, 0xcb, + 0x40, 0x85, 0x65, 0x61, 0xa3, 0x52, 0x78, 0xc3, 0x63, 0x02, 0x85, 0x48, 0xc4, 0xf8, 0x28, 0x83, + 0xca, 0x1b, 0x83, 0x2d, 0x7f, 0xcf, 0x67, 0xb4, 0x77, 0xcc, 0xbd, 0x79, 0x45, 0x66, 0xe4, 0xb9, + 0xd0, 0x1a, 0x3c, 0x23, 0x97, 0x79, 0xf8, 0x8d, 0xe0, 0x9c, 0x51, 0x72, 0xbe, 0xf1, 0x39, 0x13, + 0x9c, 0x2e, 0xbf, 0xc9, 0xa0, 0xa9, 0x7a, 0xd7, 0xa6, 0x0e, 0x6b, 0xd8, 0xbe, 0xcc, 0x08, 0x8f, + 0xb9, 0x55, 0x0e, 0x57, 0x97, 0x7e, 0x86, 0x5b, 0x42, 0xe3, 0xc3, 0x0c, 0xaa, 0x2c, 0x0f, 0xd8, + 0xf6, 0x32, 0x83, 0xa3, 0xfa, 0x0b, 0xf9, 0xd1, 0xfc, 0x24, 0x8d, 0x34, 0x42, 0x7d, 0xca, 0x82, + 0x14, 0x7b, 0xd3, 0x7d, 0x44, 0x9d, 0x17, 0x90, 0xdb, 0xaa, 0x39, 0x6a, 0xe6, 0x39, 0x73, 0xd4, + 0xc0, 0xa8, 0xd9, 0xcf, 0x99, 0xab, 0xf3, 0xea, 0x87, 0x27, 0xb1, 0x27, 0x64, 0x19, 0x2f, 0xa0, + 0x88, 0x3b, 0xca, 0x65, 0xfc, 0x25, 0x8d, 0x66, 0x37, 0xbd, 0x81, 0xcf, 0xa8, 0x55, 0xef, 0xf2, + 0x7f, 0xbc, 0x63, 0xee, 0x97, 0xc9, 0x05, 0x1d, 0x73, 0x0f, 0xfd, 0x2d, 0x8d, 0x2e, 0x45, 0x17, + 0x74, 0x12, 0x4e, 0x81, 0xbf, 0xa6, 0xd1, 0xf9, 0xaf, 0xda, 0x6c, 0x7b, 0xb0, 0x35, 0xee, 0x8b, + 0x9c, 0xbc, 0x15, 0x1d, 0xf3, 0xc8, 0xfb, 0x73, 0x1a, 0xcd, 0xac, 0xaf, 0x36, 0xea, 0x27, 0xc5, + 0x43, 0x13, 0xeb, 0x39, 0x01, 0xfe, 0xd9, 0x58, 0xbe, 0xb7, 0x76, 0x92, 0xfc, 0x13, 0x59, 0xcf, + 0x31, 0xf7, 0xcf, 0x87, 0x18, 0xe5, 0xd7, 0x1d, 0xba, 0xfe, 0x10, 0xdf, 0x56, 0x6e, 0x4f, 0xe4, + 0x12, 0xa6, 0x55, 0x35, 0x00, 0x34, 0x53, 0x44, 0xb9, 0x63, 0x59, 0x52, 0x1b, 0xe7, 0x72, 0xea, + 0x58, 0x95, 0x11, 0x48, 0x33, 0x45, 0xd4, 0x06, 0xfb, 0x92, 0xda, 0x59, 0x96, 0x13, 0x8e, 0x48, + 0x09, 0x24, 0x90, 0x92, 0x06, 0x5e, 0x4b, 0x6a, 0xe4, 0xc6, 0x9f, 0x4c, 0x4d, 0x72, 0x34, 0x53, + 0x24, 0xb9, 0x01, 0x1c, 0x79, 0xf2, 0x2c, 0x8b, 0xd8, 0xd9, 0x58, 0x11, 0x0c, 0x58, 0x33, 0x45, + 0xa2, 0xcf, 0xa3, 0xef, 0x44, 0x1e, 0xea, 0xca, 0x02, 0x76, 0x26, 0x26, 0xca, 0xa1, 0x66, 0x8a, + 0xc4, 0x9e, 0xf4, 0x46, 0x1e, 0xa5, 0x42, 0xf1, 0x3a, 0x39, 0x28, 0x60, 0xca, 0xa0, 0xe2, 0x01, + 0xeb, 0x57, 0x62, 0x2f, 0x38, 0xa1, 0xb2, 0xad, 0x2c, 0x9e, 0x8f, 0x09, 0x0b, 0xb0, 0x99, 0x22, + 0xb1, 0xf7, 0x9e, 0xf3, 0xc1, 0x5b, 0x45, 0xf9, 0x5c, 0xea, 0xac, 0x12, 0x60, 0xf6, 0x63, 0x6e, + 0xa5, 0xe0, 0x2d, 0xe3, 0x92, 0xfa, 0xaa, 0x4e, 0xbe, 0x7f, 0xc2, 0xb1, 0x51, 0x56, 0x1c, 0x8b, + 0x7b, 0x47, 0x79, 0x7d, 0xf7, 0x6e, 0xfc, 0x15, 0x8f, 0x56, 0x89, 0xd7, 0x5c, 0x2a, 0xda, 0x4c, + 0x91, 0xf8, 0xab, 0x9f, 0x3b, 0x91, 0x17, 0x24, 0x5a, 0x35, 0xd1, 0xaa, 0x1c, 0x52, 0xac, 0x0a, + 0x6f, 0x4d, 0xde, 0x8d, 0x3f, 0x69, 0xd0, 0xce, 0x24, 0x0e, 0x2d, 0x51, 0x65, 0xe8, 0xe0, 0x09, + 0xc4, 0x9d, 0xc8, 0xa5, 0xb9, 0x76, 0x36, 0x79, 0x68, 0x93, 0x99, 0xea, 0xd0, 0xe2, 0x7a, 0x3d, + 0x72, 0x7d, 0xab, 0x9d, 0x4b, 0x74, 0x28, 0x60, 0x8a, 0x43, 0xc5, 0x55, 0xef, 0x9d, 0x48, 0x43, + 0x5a, 0x9b, 0x8a, 0x0e, 0xaa, 0x40, 0x7c, 0x50, 0xb5, 0x75, 0xbd, 0xa4, 0xf6, 0x69, 0xb5, 0xe9, + 0xa8, 0x83, 0x42, 0x84, 0x3b, 0x48, 0xe9, 0xe7, 0xea, 0xd0, 0xb5, 0xd2, 0x30, 0xb0, 0x57, 0xc6, + 0x33, 0xac, 0xb7, 0x9a, 0x29, 0x02, 0xfd, 0x2c, 0x43, 0x74, 0x17, 0xb5, 0x19, 0xe0, 0xa8, 0x06, + 0x1c, 0x9c, 0xd6, 0x4c, 0x11, 0xd1, 0x79, 0xbc, 0xad, 0xb4, 0x4c, 0xb4, 0xd9, 0xe8, 0x11, 0x31, + 0x06, 0xf8, 0x11, 0x11, 0x36, 0x56, 0xee, 0x4e, 0xb6, 0x15, 0xb4, 0xf3, 0xd1, 0x33, 0x2e, 0x8e, + 0x37, 0x53, 0x64, 0xb2, 0x15, 0x71, 0x27, 0x52, 0x69, 0x6b, 0x17, 0xa2, 0xe6, 0x52, 0x20, 0x6e, + 0x2e, 0xb5, 0x26, 0x5f, 0x4f, 0xbc, 0x75, 0xd1, 0x2e, 0x82, 0x82, 0x97, 0xc6, 0x0a, 0x26, 0x59, + 0x9a, 0x29, 0x92, 0x78, 0x5f, 0xf3, 0xad, 0xfd, 0xeb, 0x5d, 0x4d, 0x03, 0xad, 0x73, 0xca, 0xe6, + 0x4a, 0xe4, 0x6b, 0xa6, 0xc8, 0xfe, 0x35, 0xf3, 0x92, 0x5a, 0x7a, 0x6a, 0x97, 0xa2, 0xfe, 0x0d, + 0x11, 0xee, 0x5f, 0xa5, 0x44, 0x5d, 0x52, 0x2b, 0x3d, 0xed, 0xf2, 0xa4, 0x54, 0x78, 0xa8, 0x2a, + 0x15, 0x21, 0x49, 0x2e, 0xac, 0xb4, 0x97, 0x40, 0xfe, 0x4a, 0x20, 0x9f, 0xc4, 0xd3, 0x4c, 0x91, + 0xe4, 0xa2, 0x8c, 0x24, 0xd7, 0x36, 0xda, 0x95, 0x83, 0x74, 0x8e, 0x67, 0x97, 0x5c, 0x17, 0x99, + 0x07, 0x94, 0x17, 0xda, 0x55, 0x50, 0x7c, 0x3d, 0x59, 0x71, 0xd4, 0xea, 0x07, 0x14, 0x29, 0xf7, + 0xf7, 0xc9, 0xf5, 0xb5, 0x6b, 0xa0, 0xfe, 0x6a, 0xa0, 0x3e, 0x91, 0xa9, 0x99, 0x22, 0xfb, 0x54, + 0x0a, 0xf7, 0xf7, 0x49, 0xb8, 0x35, 0xfd, 0x40, 0xb5, 0x63, 0x7b, 0xec, 0x93, 0xae, 0xaf, 0x27, + 0x66, 0xbd, 0xda, 0x5c, 0x34, 0xaa, 0x13, 0x58, 0x78, 0x54, 0x27, 0xe5, 0xcb, 0xeb, 0x89, 0x69, + 0xa7, 0x76, 0xfd, 0x00, 0x85, 0xe3, 0x39, 0x26, 0x26, 0xac, 0xeb, 0x89, 0x79, 0x9f, 0x66, 0x44, + 0x15, 0x26, 0xb0, 0x70, 0x85, 0x49, 0x19, 0xe3, 0x7a, 0x62, 0xe2, 0xa5, 0xdd, 0x38, 0x40, 0x61, + 0x38, 0xc3, 0x04, 0x72, 0xad, 0x88, 0xf2, 0x2b, 0x5c, 0xc8, 0xf8, 0x79, 0x1a, 0x55, 0x37, 0x98, + 0x47, 0xcd, 0x9e, 0xbc, 0x29, 0xbb, 0x8c, 0x4a, 0xf7, 0xfb, 0x5d, 0xd7, 0xb4, 0x82, 0x3f, 0xff, + 0x23, 0xe3, 0xdf, 0xf8, 0x26, 0x3a, 0xbb, 0x66, 0xfa, 0x0c, 0x24, 0x95, 0xbf, 0x08, 0x21, 0x31, + 0x2a, 0x5e, 0x13, 0x7c, 0x42, 0x0e, 0xde, 0x12, 0x67, 0x9f, 0xf9, 0x08, 0xa2, 0xc4, 0x13, 0x34, + 0x78, 0xf9, 0x10, 0x93, 0xad, 0xcd, 0x3e, 0xf9, 0xf7, 0xb5, 0xd4, 0x93, 0xa7, 0xd7, 0xd2, 0x1f, + 0x3f, 0xbd, 0x96, 0xfe, 0xd7, 0xd3, 0x6b, 0xe9, 0x9f, 0xfc, 0xe7, 0x5a, 0x6a, 0xab, 0x00, 0x3a, + 0xde, 0xf8, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x90, 0x3f, 0xc4, 0x05, 0x3a, 0x00, 0x00, +} diff --git a/lib/events/events.proto b/lib/events/events.proto new file mode 100644 index 0000000000000..8f239a5e088d3 --- /dev/null +++ b/lib/events/events.proto @@ -0,0 +1,957 @@ +syntax = "proto3"; +package events; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/struct.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +// Metadata is a common event metadata +message Metadata { + // Index is a monotonicaly incremented index in the event sequence + int64 Index = 1 [ (gogoproto.jsontag) = "ei" ]; + + // Type is the event type + string Type = 2 [ (gogoproto.jsontag) = "event" ]; + + // ID is a unique event identifier + string ID = 3 [ (gogoproto.jsontag) = "uid,omitempty" ]; + + // Code is a unique event code + string Code = 4 [ (gogoproto.jsontag) = "code,omitempty" ]; + + // Time is event time + google.protobuf.Timestamp Time = 5 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false, (gogoproto.jsontag) = "time" ]; +} + +// SesssionMetadata is a common session event metadata +message SessionMetadata { + // SessionID is a unique UUID of the session. + string SessionID = 1 [ (gogoproto.jsontag) = "sid" ]; +} + +// UserMetadata is a common user event metadata +message UserMetadata { + // User is teleport user name + string User = 1 [ (gogoproto.jsontag) = "user" ]; + + // Login is OS login + string Login = 2 [ (gogoproto.jsontag) = "login,omitempty" ]; +} + +// Server is a server metadata +message ServerMetadata { + // ServerNamespace is a namespace of the server event + string ServerNamespace = 1 [ (gogoproto.jsontag) = "namespace,omitempty" ]; + + // ServerID is the UUID of the server the session occurred on. + string ServerID = 2 [ (gogoproto.jsontag) = "server_id" ]; + + // ServerHostname is the hostname of the server the session occurred on. + string ServerHostname = 3 [ (gogoproto.jsontag) = "server_hostname,omitempty" ]; + + // ServerAddr is the address of the server the session occurred on. + string ServerAddr = 4 [ (gogoproto.jsontag) = "server_addr,omitempty" ]; + + // ServerLabels are the labels (static and dynamic) of the server the + // session occurred on. + map ServerLabels = 5 + [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "server_labels,omitempty" ]; +} + +// Connection contains connection infro +message ConnectionMetadata { + // LocalAddr is a target address on the host + string LocalAddr = 1 [ (gogoproto.jsontag) = "addr.local,omitempty" ]; + + // RemoteAddr is a client (user's) address + string RemoteAddr = 2 [ (gogoproto.jsontag) = "addr.remote,omitempty" ]; + + // Protocol specifies protocol that was captured + string Protocol = 3 [ (gogoproto.jsontag) = "proto,omitempty" ]; +} + +// SessionStart is a session start event +message SessionStart { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // TerminalSize is expressed as 'W:H' + string TerminalSize = 6 [ (gogoproto.jsontag) = "size,omitempty" ]; +} + +// SessionJoin emitted when another user joins a session +message SessionJoin { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// SessionPrint event happens every time a write occurs to +// temirnal I/O during a session +message SessionPrint { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ChunkIndex is a monotonicaly incremented index for ordering print events + int64 ChunkIndex = 2 [ (gogoproto.jsontag) = "ci" ]; + + // Data is data transferred, it is not marshaled to JSON format + bytes Data = 3 [ (gogoproto.nullable) = true, (gogoproto.jsontag) = "-" ]; + + // Bytes says how many bytes have been written into the session + // during "print" event + int64 Bytes = 4 [ (gogoproto.jsontag) = "bytes" ]; + + // DelayMilliseconds is the delay in milliseconds from the start of the session + int64 DelayMilliseconds = 5 [ (gogoproto.jsontag) = "ms" ]; + + // Offset is the offset in bytes in the session file + int64 Offset = 6 [ (gogoproto.jsontag) = "offset" ]; +} + +// SessionReject event happens when a user hits the limit of maximum +// concurrent connections in the cluster `max_connections` +message SessionReject { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Reason is a field that specifies reason for event, e.g. in disconnect + // event it explains why server disconnected the client + string Reason = 5 [ (gogoproto.jsontag) = "reason" ]; + + // Maximum is an event field specifying a maximal value (e.g. the value + // of `max_connections` for a `session.rejected` event). + int64 Maximum = 6 [ (gogoproto.jsontag) = "max" ]; +} + +// Resize means that some user resized PTY on the client +message Resize { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // TerminalSize is expressed as 'W:H' + string TerminalSize = 6 [ (gogoproto.jsontag) = "size,omitempty" ]; +} + +// SessionEnd is a session end event +message SessionEnd { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // EnhancedRecording is used to indicate if the recording was an + // enhanced recording or not. + bool EnhancedRecording = 6 [ (gogoproto.jsontag) = "enhanced_recording" ]; + + // Interactive is used to indicate if the session was interactive + // (has PTY attached) or not (exec session). + bool Interactive = 7 [ (gogoproto.jsontag) = "interactive" ]; + + // Participants is a list of participants in the session. + repeated string Participants = 8 [ (gogoproto.jsontag) = "participants" ]; + + // StartTime is the timestamp at which the session began. + google.protobuf.Timestamp StartTime = 9 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "session_start,omitempty" + ]; + + // EndTime is the timestamp at which the session ended. + google.protobuf.Timestamp EndTime = 10 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "session_stop,omitempty" + ]; +} + +// BPFMetadata is a common BPF process metadata +message BPFMetadata { + // PID is the ID of the process. + uint64 PID = 1 [ (gogoproto.jsontag) = "pid" ]; + + // CgroupID is the internal cgroupv2 ID of the event. + uint64 CgroupID = 2 [ (gogoproto.jsontag) = "cgroup_id" ]; + + // Program is name of the executable. + string Program = 3 [ (gogoproto.jsontag) = "program" ]; +} + +// Status contains common command or operation status fields +message Status { + // Success indicates the success or failure of the operation + bool Success = 1 [ (gogoproto.jsontag) = "success" ]; + + // Error includes system error message for the failed attempt + string Error = 2 [ (gogoproto.jsontag) = "error,omitempty" ]; + + // UserMessage is a user-friendly message for successfull or unsuccessfull auth attempt + string UserMessage = 3 [ (gogoproto.jsontag) = "message,omitempty" ]; +} + +// SessionCommand is a session command event +message SessionCommand { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // BPFMetadata is a common BPF subsystem metadata + BPFMetadata BPF = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // PPID is the PID of the parent process. + uint64 PPID = 6 [ (gogoproto.jsontag) = "ppid" ]; + + // Path is the full path to the executable. + string Path = 7 [ (gogoproto.jsontag) = "path" ]; + + // Argv is the list of arguments to the program. Note, the first element does + // not contain the name of the process. + repeated string Argv = 8 [ (gogoproto.jsontag) = "argv" ]; + + // ReturnCode is the return code of execve. + int32 ReturnCode = 9 [ (gogoproto.jsontag) = "return_code" ]; +} + +// SessionDisk is a session disk access event +message SessionDisk { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // BPFMetadata is a common BPF subsystem metadata + BPFMetadata BPF = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Path is the full path to the executable. + string Path = 6 [ (gogoproto.jsontag) = "path" ]; + + // Flags are the flags passed to open. + int32 Flags = 7 [ (gogoproto.jsontag) = "flags" ]; + + // ReturnCode is the return code of disk open + int32 ReturnCode = 8 [ (gogoproto.jsontag) = "return_code" ]; +} + +// SessionNetwork is a network event +message SessionNetwork { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // BPFMetadata is a common BPF subsystem metadata + BPFMetadata BPF = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SrcAddr is the source IP address of the connection. + string SrcAddr = 6 [ (gogoproto.jsontag) = "src_addr" ]; + + // DstAddr is the destination IP address of the connection. + string DstAddr = 7 [ (gogoproto.jsontag) = "dst_addr" ]; + + // DstPort is the destination port of the connection. + int32 DstPort = 8 [ (gogoproto.jsontag) = "dst_port" ]; + + // TCPVersion is the version of TCP (4 or 6). + int32 TCPVersion = 9 [ (gogoproto.jsontag) = "version" ]; +} + +// SessionData is emitted to report session data usage. +message SessionData { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // BytesTransmitted is the amount of bytes transmitted + uint64 BytesTransmitted = 6 [ (gogoproto.jsontag) = "tx" ]; + + // BytesReceived is the amount of bytes received + uint64 BytesReceived = 7 [ (gogoproto.jsontag) = "rx" ]; +} + +// SessionLeave is emitted to report that a user left the session +message SessionLeave { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// UserLogin records a successfull or failed user login event +message UserLogin { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Status contains common command or operation status fields + Status Status = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Method is the event field indicating how the login was performed + string Method = 4 [ (gogoproto.jsontag) = "method,omitempty" ]; + + // IdentityAttributes is a map of user attributes received from identity provider + google.protobuf.Struct IdentityAttributes = 5 + [ (gogoproto.jsontag) = "attributes,omitempty", (gogoproto.casttype) = "Struct" ]; +} + +// ResourceMetadata is a common resource metadata +message ResourceMetadata { + // ResourceName is a resource name + string Name = 1 [ (gogoproto.jsontag) = "name,omitempty" ]; + + // Expires is set if resource expires + google.protobuf.Timestamp Expires = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "expires" + ]; + + // UpdatedBy if set indicates the user who modified the resource + string UpdatedBy = 3 [ (gogoproto.jsontag) = "updated_by,omitempty" ]; + + // TTL is a TTL of reset password token represented as duration, e.g. "10m" + // used for compatibility purposes for some events, Expires should be used instead + // as it's more useful (contains exact expiration date/time) + string TTL = 4 [ (gogoproto.jsontag) = "ttl,omitempty" ]; +} + +// UserCreate is emitted when the user is created or updated (upsert). +message UserCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Roles is a list of roles for the user. + repeated string Roles = 4 [ (gogoproto.jsontag) = "roles" ]; + + // Connector is the connector used to create the user. + string Connector = 5 [ (gogoproto.jsontag) = "connector" ]; +} + +// UserDelete is emitted when a user gets deleted +message UserDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// UserPasswordChange is emitted when the user changes their own password. +message UserPasswordChange { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// AccessRequestCreate is emitted when access request has been created or updated +message AccessRequestCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Roles is a list of roles for the user. + repeated string Roles = 4 [ (gogoproto.jsontag) = "roles" ]; + + // RequestID is access request ID + string RequestID = 5 [ (gogoproto.jsontag) = "id" ]; + + // RequestState is access request state + string RequestState = 6 [ (gogoproto.jsontag) = "state" ]; + + // Delegator is used by teleport plugins to indicate the identity + // which caused them to update state. + string Delegator = 7 [ (gogoproto.jsontag) = "delegator,omitempty" ]; +} + +// PortForward is emitted when a user requests port forwarding. +message PortForward { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Status contains operation success or failure status + Status Status = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Addr is a target port forwarding address + string Addr = 5 [ (gogoproto.jsontag) = "addr" ]; +} + +// X11Forward is emitted when a user requests X11 protocol forwarding +message X11Forward { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Status contains operation success or failure status + Status Status = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// CommandMetadata specifies common command fields +message CommandMetadata { + // Command is the executed command name + string Command = 1 [ (gogoproto.jsontag) = "command" ]; + // ExitCode specifies command exit code + string ExitCode = 2 [ (gogoproto.jsontag) = "exitCode,omitempty" ]; + // Error is an optional exit error, set if command has failed + string Error = 3 [ (gogoproto.jsontag) = "exitError,omitempty" ]; +} + +// Exec specifies command exec event +message Exec { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // CommandMetadata is a common command metadata + CommandMetadata Command = 6 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// SCP is emitted when data transfer has occurred between server and client +message SCP { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // SessionMetadata is a common event session metadata + SessionMetadata Session = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 5 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // CommandMetadata is a common command metadata + CommandMetadata Command = 6 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Path is a copy path + string Path = 7 [ (gogoproto.jsontag) = "path" ]; + + // Action is upload or download + string Action = 8 [ (gogoproto.jsontag) = "action" ]; +} + +// Subsystem is emitted when a user requests a new subsystem. +message Subsystem { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Name is a subsystem name + string Name = 4 [ (gogoproto.jsontag) = "name" ]; + + // Error contains error in case of unsucessfull attempt + string Error = 5 [ (gogoproto.jsontag) = "exitError" ]; +} + +// ClientDisconnect is emitted when client is disconnected +// by the server due to inactivity or any other reason +message ClientDisconnect { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ServerMetadata is a common server metadata + ServerMetadata Server = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Reason is a field that specifies reason for event, e.g. in disconnect + // event it explains why server disconnected the client + string Reason = 5 [ (gogoproto.jsontag) = "reason" ]; +} + +// AuthAttempt is emitted upon a failed or successfull authentication attempt. +message AuthAttempt { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ConnectionMetadata holds information about the connection + ConnectionMetadata Connection = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // Status contains common command or operation status fields + Status Status = 4 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// ResetPasswordTokenCreate is emitted when token is created. +message ResetPasswordTokenCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// RoleCreate is emitted when a role is created/updated. +message RoleCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// RoleDelete is emitted when a role is deleted +message RoleDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// TrustedClusterCreate is the event for creating a trusted cluster. +message TrustedClusterCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// TrustedClusterDelete is the event for removing a trusted cluster. +message TrustedClusterDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// TrustedClusterTokenCreate is the event for +// creating new join token for a trusted cluster. +message TrustedClusterTokenCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// GithubConnectorCreate fires when a Github connector is created/updated. +message GithubConnectorCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// GithubConnectorDelete fires when a Github connector is deleted. +message GithubConnectorDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// OIDCConnectorCreate fires when OIDC connector is created/updated. +message OIDCConnectorCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// OIDCConnectorDelete fires when OIDC connector is deleted. +message OIDCConnectorDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// SAMLConnectorCreate fires when SAML connector is created/updated. +message SAMLConnectorCreate { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// SAMLConnectorDelete fires when SAML connector is deleted. +message SAMLConnectorDelete { + // Metadata is a common event metadata + Metadata Metadata = 1 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // ResourceMetadata is a common resource event metadata + ResourceMetadata Resource = 2 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; + + // User is a common user event metadata + UserMetadata User = 3 + [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "" ]; +} + +// OneOf is a union of one of audit events submitted to the auth service +message OneOf { + // Event is one of the audit events + oneof Event { + events.UserLogin UserLogin = 1; + events.UserCreate UserCreate = 2; + events.UserDelete UserDelete = 3; + events.UserPasswordChange UserPasswordChange = 4; + events.SessionStart SessionStart = 5; + events.SessionJoin SessionJoin = 6; + events.SessionPrint SessionPrint = 7; + events.SessionReject SessionReject = 8; + events.Resize Resize = 9; + events.SessionEnd SessionEnd = 10; + events.SessionCommand SessionCommand = 11; + events.SessionDisk SessionDisk = 12; + events.SessionNetwork SessionNetwork = 13; + events.SessionData SessionData = 14; + events.SessionLeave SessionLeave = 15; + events.PortForward PortForward = 16; + events.X11Forward X11Forward = 17; + events.SCP SCP = 18; + events.Exec Exec = 19; + events.Subsystem Subsystem = 20; + events.ClientDisconnect ClientDisconnect = 21; + events.AuthAttempt AuthAttempt = 22; + events.AccessRequestCreate AccessRequestCreate = 23; + events.ResetPasswordTokenCreate ResetPasswordTokenCreate = 24; + events.RoleCreate RoleCreate = 25; + events.RoleDelete RoleDelete = 26; + events.TrustedClusterCreate TrustedClusterCreate = 27; + events.TrustedClusterDelete TrustedClusterDelete = 28; + events.TrustedClusterTokenCreate TrustedClusterTokenCreate = 29; + events.GithubConnectorCreate GithubConnectorCreate = 30; + events.GithubConnectorDelete GithubConnectorDelete = 31; + events.OIDCConnectorCreate OIDCConnectorCreate = 32; + events.OIDCConnectorDelete OIDCConnectorDelete = 33; + events.SAMLConnectorCreate SAMLConnectorCreate = 34; + events.SAMLConnectorDelete SAMLConnectorDelete = 35; + } +} + +// StreamStatus reflects stream status +message StreamStatus { + // UploadID represents upload ID + string UploadID = 1; + // LastEventIndex updates last event index + int64 LastEventIndex = 2; + // LastUploadTime is the time of the last upload + google.protobuf.Timestamp LastUploadTime = 3 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; +} diff --git a/lib/events/events_test.go b/lib/events/events_test.go new file mode 100644 index 0000000000000..bf7e5f9fc9268 --- /dev/null +++ b/lib/events/events_test.go @@ -0,0 +1,499 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "encoding/json" + "reflect" + "time" + + "gopkg.in/check.v1" + + "github.com/gravitational/teleport/lib/fixtures" + "github.com/gravitational/teleport/lib/utils" +) + +type EventsTestSuite struct { +} + +var _ = check.Suite(&EventsTestSuite{}) + +// TestJSON tests JSON marshal events +func (a *EventsTestSuite) TestJSON(c *check.C) { + type testCase struct { + name string + json string + event interface{} + } + testCases := []testCase{ + { + name: "session start event", + json: `{"ei":0,"event":"session.start","uid":"36cee9e9-9a80-4c32-9163-3d9241cdac7a","code":"T2000I","time":"2020-03-30T15:58:54.561Z","namespace":"default","sid":"5b3555dc-729f-11ea-b66a-507b9dd95841","login":"bob","user":"bob@example.com","server_id":"a7c54b0c-469c-431e-af4d-418cd3ae9694","server_hostname":"planet","server_labels":{"group":"gravitational/devc","kernel":"5.3.0-42-generic","date":"Mon Mar 30 08:58:54 PDT 2020"},"addr.local":"127.0.0.1:3022","addr.remote":"[::1]:37718","size":"80:25"}`, + event: SessionStart{ + Metadata: Metadata{ + Index: 0, + Type: SessionStartEvent, + ID: "36cee9e9-9a80-4c32-9163-3d9241cdac7a", + Code: SessionStartCode, + Time: time.Date(2020, 03, 30, 15, 58, 54, 561*int(time.Millisecond), time.UTC), + }, + ServerMetadata: ServerMetadata{ + ServerID: "a7c54b0c-469c-431e-af4d-418cd3ae9694", + ServerLabels: map[string]string{ + "kernel": "5.3.0-42-generic", + "date": "Mon Mar 30 08:58:54 PDT 2020", + "group": "gravitational/devc", + }, + ServerHostname: "planet", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "5b3555dc-729f-11ea-b66a-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "bob@example.com", + Login: "bob", + }, + ConnectionMetadata: ConnectionMetadata{ + LocalAddr: "127.0.0.1:3022", + RemoteAddr: "[::1]:37718", + }, + TerminalSize: "80:25", + }, + }, + { + name: "resize event", + json: `{"time":"2020-03-30T15:58:54.564Z","uid":"c34e512f-e6cb-44f1-ab94-4cea09002d29","event":"resize","login":"bob","sid":"5b3555dc-729f-11ea-b66a-507b9dd95841","size":"194:59","ei":1,"code":"T2002I","namespace":"default","server_id":"a7c54b0c-469c-431e-af4d-418cd3ae9694","user":"bob@example.com"}`, + event: Resize{ + Metadata: Metadata{ + Index: 1, + Type: ResizeEvent, + ID: "c34e512f-e6cb-44f1-ab94-4cea09002d29", + Code: TerminalResizeCode, + Time: time.Date(2020, 03, 30, 15, 58, 54, 564*int(time.Millisecond), time.UTC), + }, + ServerMetadata: ServerMetadata{ + ServerID: "a7c54b0c-469c-431e-af4d-418cd3ae9694", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "5b3555dc-729f-11ea-b66a-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "bob@example.com", + Login: "bob", + }, + TerminalSize: "194:59", + }, + }, + { + name: "session end event", + json: `{"code":"T2004I","ei":20,"enhanced_recording":true,"event":"session.end","interactive":true,"namespace":"default","participants":["alice@example.com"],"server_id":"a7c54b0c-469c-431e-af4d-418cd3ae9694","sid":"5b3555dc-729f-11ea-b66a-507b9dd95841","time":"2020-03-30T15:58:58.999Z","uid":"da455e0f-c27d-459f-a218-4e83b3db9426","user":"alice@example.com", "session_start":"2020-03-30T15:58:54.561Z", "session_stop": "2020-03-30T15:58:58.999Z"}`, + event: SessionEnd{ + Metadata: Metadata{ + Index: 20, + Type: SessionEndEvent, + ID: "da455e0f-c27d-459f-a218-4e83b3db9426", + Code: SessionEndCode, + Time: time.Date(2020, 03, 30, 15, 58, 58, 999*int(time.Millisecond), time.UTC), + }, + ServerMetadata: ServerMetadata{ + ServerID: "a7c54b0c-469c-431e-af4d-418cd3ae9694", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "5b3555dc-729f-11ea-b66a-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + }, + EnhancedRecording: true, + Interactive: true, + Participants: []string{"alice@example.com"}, + StartTime: time.Date(2020, 03, 30, 15, 58, 54, 561*int(time.Millisecond), time.UTC), + EndTime: time.Date(2020, 03, 30, 15, 58, 58, 999*int(time.Millisecond), time.UTC), + }, + }, + { + name: "session print event", + json: `{"time":"2020-03-30T15:58:56.959Z","event":"print","bytes":1551,"ms":2284,"offset":1957,"ei":11,"ci":9}`, + event: SessionPrint{ + Metadata: Metadata{ + Index: 11, + Type: SessionPrintEvent, + Time: time.Date(2020, 03, 30, 15, 58, 56, 959*int(time.Millisecond), time.UTC), + }, + ChunkIndex: 9, + Bytes: 1551, + DelayMilliseconds: 2284, + Offset: 1957, + }, + }, + { + name: "session command event", + json: `{"argv":["/usr/bin/lesspipe"],"login":"alice","path":"/usr/bin/dirname","return_code":0,"time":"2020-03-30T15:58:54.65Z","user":"alice@example.com","code":"T4000I","event":"session.command","pid":31638,"server_id":"a7c54b0c-469c-431e-af4d-418cd3ae9694","uid":"4f725f11-e87a-452f-96ec-ef93e9e6a260","cgroup_id":4294971450,"ppid":31637,"program":"dirname","namespace":"default","sid":"5b3555dc-729f-11ea-b66a-507b9dd95841","ei":4}`, + event: SessionCommand{ + Metadata: Metadata{ + Index: 4, + ID: "4f725f11-e87a-452f-96ec-ef93e9e6a260", + Type: SessionCommandEvent, + Time: time.Date(2020, 03, 30, 15, 58, 54, 650*int(time.Millisecond), time.UTC), + Code: SessionCommandCode, + }, + ServerMetadata: ServerMetadata{ + ServerID: "a7c54b0c-469c-431e-af4d-418cd3ae9694", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "5b3555dc-729f-11ea-b66a-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + Login: "alice", + }, + BPFMetadata: BPFMetadata{ + CgroupID: 4294971450, + Program: "dirname", + PID: 31638, + }, + PPID: 31637, + ReturnCode: 0, + Path: "/usr/bin/dirname", + Argv: []string{"/usr/bin/lesspipe"}, + }, + }, + { + name: "session network event", + json: `{"dst_port":443,"cgroup_id":4294976805,"dst_addr":"2607:f8b0:400a:801::200e","program":"curl","sid":"e9a4bd34-78ff-11ea-b062-507b9dd95841","src_addr":"2601:602:8700:4470:a3:813c:1d8c:30b9","login":"alice","pid":17604,"uid":"729498e0-c28b-438f-baa7-663a74418449","user":"alice@example.com","event":"session.network","namespace":"default","time":"2020-04-07T18:45:16.602Z","version":6,"ei":0,"code":"T4002I","server_id":"00b54ef5-ae1e-425f-8565-c71b01d8f7b8"}`, + event: SessionNetwork{ + Metadata: Metadata{ + Index: 0, + ID: "729498e0-c28b-438f-baa7-663a74418449", + Type: SessionNetworkEvent, + Time: time.Date(2020, 04, 07, 18, 45, 16, 602*int(time.Millisecond), time.UTC), + Code: SessionNetworkCode, + }, + ServerMetadata: ServerMetadata{ + ServerID: "00b54ef5-ae1e-425f-8565-c71b01d8f7b8", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "e9a4bd34-78ff-11ea-b062-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + Login: "alice", + }, + BPFMetadata: BPFMetadata{ + CgroupID: 4294976805, + Program: "curl", + PID: 17604, + }, + DstPort: 443, + DstAddr: "2607:f8b0:400a:801::200e", + SrcAddr: "2601:602:8700:4470:a3:813c:1d8c:30b9", + TCPVersion: 6, + }, + }, + { + name: "session disk event", + json: `{"time":"2020-04-07T19:56:38.545Z","login":"bob","pid":31521,"sid":"ddddce15-7909-11ea-b062-507b9dd95841","user":"bob@example.com","ei":175,"code":"T4001I","flags":142606336,"namespace":"default","uid":"ab8467af-6d85-46ce-bb5c-bdfba8acad3f","cgroup_id":4294976835,"program":"clear_console","server_id":"00b54ef5-ae1e-425f-8565-c71b01d8f7b8","event":"session.disk","path":"/etc/ld.so.cache","return_code":3}`, + event: SessionDisk{ + Metadata: Metadata{ + Index: 175, + ID: "ab8467af-6d85-46ce-bb5c-bdfba8acad3f", + Type: SessionDiskEvent, + Time: time.Date(2020, 04, 07, 19, 56, 38, 545*int(time.Millisecond), time.UTC), + Code: SessionDiskCode, + }, + ServerMetadata: ServerMetadata{ + ServerID: "00b54ef5-ae1e-425f-8565-c71b01d8f7b8", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "ddddce15-7909-11ea-b062-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "bob@example.com", + Login: "bob", + }, + BPFMetadata: BPFMetadata{ + CgroupID: 4294976835, + Program: "clear_console", + PID: 31521, + }, + Flags: 142606336, + Path: "/etc/ld.so.cache", + ReturnCode: 3, + }, + }, + { + name: "successful user.login event", + json: `{"ei": 0, "attributes":{"followers_url": "https://api.github.com/users/bob/followers", "err": null, "public_repos": 20, "site_admin": false, "app_metadata":{"roles":["example/admins","example/devc"]}, "emails":[{"email":"bob@example.com","primary":true,"verified":true,"visibility":"public"},{"email":"bob@alternative.com","primary":false,"verified":true,"visibility":null}]},"code":"T1001I","event":"user.login","method":"oidc","success":true,"time":"2020-04-07T18:45:07Z","uid":"019432f1-3021-4860-af41-d9bd1668c3ea","user":"bob@example.com"}`, + event: UserLogin{ + Metadata: Metadata{ + ID: "019432f1-3021-4860-af41-d9bd1668c3ea", + Type: UserLoginEvent, + Time: time.Date(2020, 04, 07, 18, 45, 07, 0*int(time.Millisecond), time.UTC), + Code: UserSSOLoginCode, + }, + Status: Status{ + Success: true, + }, + UserMetadata: UserMetadata{ + User: "bob@example.com", + }, + IdentityAttributes: MustEncodeMap(map[string]interface{}{ + "followers_url": "https://api.github.com/users/bob/followers", + "err": nil, + "public_repos": 20, + "site_admin": false, + "app_metadata": map[string]interface{}{"roles": []interface{}{"example/admins", "example/devc"}}, + "emails": []interface{}{ + map[string]interface{}{ + "email": "bob@example.com", + "primary": true, + "verified": true, + "visibility": "public", + }, + map[string]interface{}{ + "email": "bob@alternative.com", + "primary": false, + "verified": true, + "visibility": nil, + }, + }, + }), + Method: LoginMethodOIDC, + }, + }, + { + name: "session data event", + json: `{"addr.local":"127.0.0.1:3022","addr.remote":"[::1]:44382","code":"T2006I","ei":2147483646,"event":"session.data","login":"alice","rx":9526,"server_id":"00b54ef5-ae1e-425f-8565-c71b01d8f7b8","sid":"ddddce15-7909-11ea-b062-507b9dd95841","time":"2020-04-07T19:56:39Z","tx":10279,"uid":"cb404873-cd7c-4036-854b-42e0f5fd5f2c","user":"alice@example.com"}`, + event: SessionData{ + Metadata: Metadata{ + Index: 2147483646, + ID: "cb404873-cd7c-4036-854b-42e0f5fd5f2c", + Type: SessionDataEvent, + Time: time.Date(2020, 04, 07, 19, 56, 39, 0*int(time.Millisecond), time.UTC), + Code: SessionDataCode, + }, + ServerMetadata: ServerMetadata{ + ServerID: "00b54ef5-ae1e-425f-8565-c71b01d8f7b8", + }, + SessionMetadata: SessionMetadata{ + SessionID: "ddddce15-7909-11ea-b062-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + Login: "alice", + }, + ConnectionMetadata: ConnectionMetadata{ + LocalAddr: "127.0.0.1:3022", + RemoteAddr: "[::1]:44382", + }, + BytesReceived: 9526, + BytesTransmitted: 10279, + }, + }, + { + name: "session leave event", + json: `{"code":"T2003I","ei":39,"event":"session.leave","namespace":"default","server_id":"00b54ef5-ae1e-425f-8565-c71b01d8f7b8","sid":"ddddce15-7909-11ea-b062-507b9dd95841","time":"2020-04-07T19:56:38.556Z","uid":"d7c7489f-6559-42ad-9963-8543e518a058","user":"alice@example.com"}`, + event: SessionLeave{ + Metadata: Metadata{ + Index: 39, + ID: "d7c7489f-6559-42ad-9963-8543e518a058", + Type: SessionLeaveEvent, + Time: time.Date(2020, 04, 07, 19, 56, 38, 556*int(time.Millisecond), time.UTC), + Code: SessionLeaveCode, + }, + ServerMetadata: ServerMetadata{ + ServerID: "00b54ef5-ae1e-425f-8565-c71b01d8f7b8", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "ddddce15-7909-11ea-b062-507b9dd95841", + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + }, + }, + }, + { + name: "user update", + json: `{"ei": 0, "code":"T1003I","connector":"auth0","event":"user.update","expires":"2020-04-08T02:45:06.524816756Z","roles":["clusteradmin"],"time":"2020-04-07T18:45:07Z","uid":"e7c8e36e-adb4-4c98-b818-226d73add7fc","user":"alice@example.com"}`, + event: UserCreate{ + Metadata: Metadata{ + ID: "e7c8e36e-adb4-4c98-b818-226d73add7fc", + Type: UserUpdatedEvent, + Time: time.Date(2020, 4, 7, 18, 45, 7, 0*int(time.Millisecond), time.UTC), + Code: UserUpdateCode, + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + }, + ResourceMetadata: ResourceMetadata{ + Expires: time.Date(2020, 4, 8, 2, 45, 6, 524816756*int(time.Nanosecond), time.UTC), + }, + Connector: "auth0", + Roles: []string{"clusteradmin"}, + }, + }, + { + name: "success port forward", + json: `{"ei": 0, "addr":"localhost:3025","addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:45976","code":"T3003I","event":"port","login":"alice","success":true,"time":"2020-04-15T18:06:56.397Z","uid":"7efc5025-a712-47de-8086-7d935c110188","user":"alice@example.com"}`, + event: PortForward{ + Metadata: Metadata{ + ID: "7efc5025-a712-47de-8086-7d935c110188", + Type: PortForwardEvent, + Time: time.Date(2020, 4, 15, 18, 06, 56, 397*int(time.Millisecond), time.UTC), + Code: PortForwardCode, + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + Login: "alice", + }, + ConnectionMetadata: ConnectionMetadata{ + LocalAddr: "127.0.0.1:3022", + RemoteAddr: "127.0.0.1:45976", + }, + Status: Status{ + Success: true, + }, + Addr: "localhost:3025", + }, + }, + { + name: "rejected port forward", + json: `{"ei": 0, "addr":"localhost:3025","addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:46452","code":"T3003E","error":"port forwarding not allowed by role set: roles clusteradmin,default-implicit-role","event":"port","login":"bob","success":false,"time":"2020-04-15T18:20:21Z","uid":"097724d1-5ee3-4c8d-a911-ea6021e5b3fb","user":"bob@example.com"}`, + event: PortForward{ + Metadata: Metadata{ + ID: "097724d1-5ee3-4c8d-a911-ea6021e5b3fb", + Type: PortForwardEvent, + Time: time.Date(2020, 4, 15, 18, 20, 21, 0*int(time.Millisecond), time.UTC), + Code: PortForwardFailureCode, + }, + UserMetadata: UserMetadata{ + User: "bob@example.com", + Login: "bob", + }, + ConnectionMetadata: ConnectionMetadata{ + LocalAddr: "127.0.0.1:3022", + RemoteAddr: "127.0.0.1:46452", + }, + Status: Status{ + Error: "port forwarding not allowed by role set: roles clusteradmin,default-implicit-role", + Success: false, + }, + Addr: "localhost:3025", + }, + }, + { + name: "rejected subsystem", + json: `{"ei": 0, "addr.local":"127.0.0.1:57518","addr.remote":"127.0.0.1:3022","code":"T3001E","event":"subsystem","exitError":"some error","login":"alice","name":"proxy","time":"2020-04-15T20:28:18Z","uid":"3129a5ae-ee1e-4b39-8d7c-a0a3f218e7dc","user":"alice@example.com"}`, + event: Subsystem{ + Metadata: Metadata{ + ID: "3129a5ae-ee1e-4b39-8d7c-a0a3f218e7dc", + Type: SubsystemEvent, + Time: time.Date(2020, 4, 15, 20, 28, 18, 0*int(time.Millisecond), time.UTC), + Code: SubsystemFailureCode, + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + Login: "alice", + }, + ConnectionMetadata: ConnectionMetadata{ + LocalAddr: "127.0.0.1:57518", + RemoteAddr: "127.0.0.1:3022", + }, + Name: "proxy", + Error: "some error", + }, + }, + { + name: "failed auth attempt", + json: `{"ei": 0, "code":"T3007W","error":"ssh: principal \"bob\" not in the set of valid principals for given certificate: [\"root\" \"alice\"]","event":"auth","success":false,"time":"2020-04-22T20:53:50Z","uid":"ebac95ca-8673-44af-b2cf-65f517acf35a","user":"alice@example.com"}`, + event: AuthAttempt{ + Metadata: Metadata{ + ID: "ebac95ca-8673-44af-b2cf-65f517acf35a", + Type: AuthAttemptEvent, + Time: time.Date(2020, 4, 22, 20, 53, 50, 0*int(time.Millisecond), time.UTC), + Code: AuthAttemptFailureCode, + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + }, + Status: Status{ + Success: false, + Error: "ssh: principal \"bob\" not in the set of valid principals for given certificate: [\"root\" \"alice\"]", + }, + }, + }, + { + name: "session join", + json: `{"uid":"cd03665f-3ce1-4c22-809d-4be9512c36e2","addr.local":"127.0.0.1:3022","addr.remote":"[::1]:34902","code":"T2001I","event":"session.join","login":"root","time":"2020-04-23T18:22:35.35Z","namespace":"default","server_id":"00b54ef5-ae1e-425f-8565-c71b01d8f7b8","sid":"b0252ad2-2fa5-4bb2-a7de-2cacd1169c96","user":"bob@example.com","ei":4}`, + event: SessionJoin{ + Metadata: Metadata{ + Index: 4, + Type: SessionJoinEvent, + ID: "cd03665f-3ce1-4c22-809d-4be9512c36e2", + Code: SessionJoinCode, + Time: time.Date(2020, 04, 23, 18, 22, 35, 350*int(time.Millisecond), time.UTC), + }, + ServerMetadata: ServerMetadata{ + ServerID: "00b54ef5-ae1e-425f-8565-c71b01d8f7b8", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: "b0252ad2-2fa5-4bb2-a7de-2cacd1169c96", + }, + UserMetadata: UserMetadata{ + User: "bob@example.com", + Login: "root", + }, + ConnectionMetadata: ConnectionMetadata{ + LocalAddr: "127.0.0.1:3022", + RemoteAddr: "[::1]:34902", + }, + }, + }, + } + for i, tc := range testCases { + comment := check.Commentf("Test case %v: %v", i, tc.name) + outJSON, err := utils.FastMarshal(tc.event) + c.Assert(err, check.IsNil, comment) + + var out map[string]interface{} + err = json.Unmarshal(outJSON, &out) + c.Assert(err, check.IsNil, comment) + + // JSON key order is not deterministic when marshaling, + // this code makes sure intermediate representation is equal + var expected map[string]interface{} + err = json.Unmarshal([]byte(tc.json), &expected) + c.Assert(err, check.IsNil, comment) + + fixtures.DeepCompareMaps(c, out, expected) + + // unmarshal back into the type and compare the values + outEvent := reflect.New(reflect.TypeOf(tc.event)) + err = json.Unmarshal(outJSON, outEvent.Interface()) + c.Assert(err, check.IsNil, comment) + + fixtures.DeepCompare(c, outEvent.Elem().Interface(), tc.event) + } +} diff --git a/lib/events/fields.go b/lib/events/fields.go index 419e3a0f98ba6..e546930b27616 100644 --- a/lib/events/fields.go +++ b/lib/events/fields.go @@ -32,6 +32,22 @@ import ( "github.com/jonboulle/clockwork" ) +// ValidateServerMetadata checks that event server ID of the event +// if present, matches the passed server ID and namespace has proper syntax +func ValidateServerMetadata(event AuditEvent, serverID string) error { + getter, ok := event.(ServerMetadataGetter) + if !ok { + return nil + } + if getter.GetServerID() != serverID { + return trace.BadParameter("server %q can't emit event with server ID %q", serverID, getter.GetServerID()) + } + if !services.IsValidNamespace(getter.GetServerNamespace()) { + return trace.BadParameter("invalid namespace %q", getter.GetServerNamespace()) + } + return nil +} + // UpdateEventFields updates passed event fields with additional information // common for all event types such as unique IDs, timestamps, codes, etc. // diff --git a/lib/events/filelog.go b/lib/events/filelog.go index 43ce244430d25..f772ab66574ba 100644 --- a/lib/events/filelog.go +++ b/lib/events/filelog.go @@ -113,8 +113,29 @@ type FileLog struct { fileTime time.Time } -// EmitAuditEvent adds a new event to the log. Part of auth.IFileLog interface. -func (l *FileLog) EmitAuditEvent(event Event, fields EventFields) error { +// EmitAuditEvent adds a new event to the log. +func (l *FileLog) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + // see if the log needs to be rotated + err := l.rotateLog() + if err != nil { + log.Error(err) + } + // line is the text to be logged + line, err := utils.FastMarshal(event) + if err != nil { + return trace.Wrap(err) + } + if l.file == nil { + return trace.NotFound( + "file log is not found due to permission or disk issue") + } + // log it to the main log file: + _, err = fmt.Fprintln(l.file, string(line)) + return trace.ConvertSystemError(err) +} + +// EmitAuditEventLegacy adds a new event to the log. Part of auth.IFileLog interface. +func (l *FileLog) EmitAuditEventLegacy(event Event, fields EventFields) error { // see if the log needs to be rotated err := l.rotateLog() if err != nil { @@ -272,7 +293,7 @@ func (l *FileLog) processSlice(sl SessionLogger, slice *SessionSlice) error { if err != nil { return trace.Wrap(err) } - if err := l.EmitAuditEvent(Event{Name: chunk.EventType}, fields); err != nil { + if err := l.EmitAuditEventLegacy(Event{Name: chunk.EventType}, fields); err != nil { return trace.Wrap(err) } } diff --git a/lib/events/filesessions/fileasync.go b/lib/events/filesessions/fileasync.go new file mode 100644 index 0000000000000..a1837d8231452 --- /dev/null +++ b/lib/events/filesessions/fileasync.go @@ -0,0 +1,474 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesessions + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + log "github.com/sirupsen/logrus" +) + +// UploaderConfig sets up configuration for uploader service +type UploaderConfig struct { + // ScanDir is data directory with the uploads + ScanDir string + // Clock is the clock replacement + Clock clockwork.Clock + // Context is an optional context + Context context.Context + // ScanPeriod is a uploader dir scan period + ScanPeriod time.Duration + // ConcurrentUploads sets up how many parallel uploads to schedule + ConcurrentUploads int + // Streamer is upstream streamer to upload events to + Streamer events.Streamer + // EventsC is an event channel used to signal events + // used in tests + EventsC chan events.UploadEvent + // Component is used for logging purposes + Component string +} + +// CheckAndSetDefaults checks and sets default values of UploaderConfig +func (cfg *UploaderConfig) CheckAndSetDefaults() error { + if cfg.Streamer == nil { + return trace.BadParameter("missing parameter Streamer") + } + if cfg.ScanDir == "" { + return trace.BadParameter("missing parameter ScanDir") + } + if cfg.ConcurrentUploads <= 0 { + cfg.ConcurrentUploads = defaults.UploaderConcurrentUploads + } + if cfg.ScanPeriod <= 0 { + cfg.ScanPeriod = defaults.UploaderScanPeriod + } + if cfg.Context == nil { + cfg.Context = context.Background() + } + if cfg.Clock == nil { + cfg.Clock = clockwork.NewRealClock() + } + if cfg.Component == "" { + cfg.Component = teleport.ComponentUpload + } + return nil +} + +// NewUploader creates new disk based session logger +func NewUploader(cfg UploaderConfig) (*Uploader, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + // completer scans for uploads that have been initiated, but not completed + // by the client (aborted or crashed) and completed them + handler, err := NewHandler(Config{ + Directory: cfg.ScanDir, + }) + if err != nil { + return nil, trace.Wrap(err) + } + uploadCompleter, err := events.NewUploadCompleter(events.UploadCompleterConfig{ + Uploader: handler, + Unstarted: true, + }) + if err != nil { + return nil, trace.Wrap(err) + } + ctx, cancel := context.WithCancel(cfg.Context) + uploader := &Uploader{ + uploadCompleter: uploadCompleter, + cfg: cfg, + log: log.WithFields(log.Fields{ + trace.Component: cfg.Component, + }), + cancel: cancel, + ctx: ctx, + semaphore: make(chan struct{}, cfg.ConcurrentUploads), + } + return uploader, nil +} + +// Uploader implements a disk based session logger. The important +// property of the disk based logger is that it never fails and can be used as +// a fallback implementation behind more sophisticated loggers. +type Uploader struct { + semaphore chan struct{} + + cfg UploaderConfig + log *log.Entry + uploadCompleter *events.UploadCompleter + + cancel context.CancelFunc + ctx context.Context +} + +// Serve runs the uploader until stopped +func (u *Uploader) Serve() error { + t := u.cfg.Clock.NewTicker(u.cfg.ScanPeriod) + defer t.Stop() + for { + select { + case <-u.ctx.Done(): + u.log.Debugf("Uploader is exiting.") + return nil + case <-t.Chan(): + if err := u.uploadCompleter.CheckUploads(u.ctx); err != nil { + if trace.Unwrap(err) != errContext { + u.log.WithError(err).Warningf("Completer scan failed.") + } + } + if err := u.Scan(); err != nil { + if trace.Unwrap(err) != errContext { + u.log.WithError(err).Warningf("Uploader scan failed.") + } + } + } + } +} + +// Scan scans the streaming directory and uploads recordings +func (u *Uploader) Scan() error { + files, err := ioutil.ReadDir(u.cfg.ScanDir) + if err != nil { + return trace.ConvertSystemError(err) + } + u.log.Debugf("Found %v files in dir %v.", len(files), u.cfg.ScanDir) + for i := range files { + fi := files[i] + if fi.IsDir() { + continue + } + if filepath.Ext(fi.Name()) == checkpointExt { + continue + } + if err := u.startUpload(fi.Name()); err != nil { + if trace.IsCompareFailed(err) { + u.log.Debugf("Uploader detected locked file %v, another process is processing it.", fi.Name()) + continue + } + return trace.Wrap(err) + } + } + return nil +} + +// checkpointFilePath returns a path to checkpoint file for a session +func (u *Uploader) checkpointFilePath(sid session.ID) string { + return filepath.Join(u.cfg.ScanDir, sid.String()+checkpointExt) +} + +// Close closes all operations +func (u *Uploader) Close() error { + u.cancel() + return u.uploadCompleter.Close() +} + +type upload struct { + sessionID session.ID + reader *events.ProtoReader + file *os.File + checkpointFile *os.File +} + +// readStatus reads stream status +func (u *upload) readStatus() (*events.StreamStatus, error) { + data, err := ioutil.ReadAll(u.checkpointFile) + if err != nil { + return nil, trace.ConvertSystemError(err) + } + if len(data) == 0 { + return nil, trace.NotFound("no status found") + } + var status events.StreamStatus + err = utils.FastUnmarshal(data, &status) + if err != nil { + return nil, trace.Wrap(err) + } + return &status, nil +} + +// writeStatus writes stream status +func (u *upload) writeStatus(status events.StreamStatus) error { + data, err := utils.FastMarshal(status) + if err != nil { + return trace.Wrap(err) + } + _, err = u.checkpointFile.Seek(0, 0) + if err != nil { + return trace.ConvertSystemError(err) + } + n, err := u.checkpointFile.Write(data) + if err != nil { + return trace.Wrap(err) + } + if n < len(data) { + return trace.ConvertSystemError(io.ErrShortWrite) + } + return nil +} + +// releaseFile releases file and associated resources +// in a correct order +func (u *upload) Close() error { + return trace.NewAggregate( + u.reader.Close(), + utils.FSUnlock(u.file), + u.file.Close(), + utils.NilCloser(u.checkpointFile).Close(), + ) +} + +func (u *upload) removeFiles() error { + var errs []error + if u.file != nil { + errs = append(errs, + trace.ConvertSystemError(os.Remove(u.file.Name()))) + } + if u.checkpointFile != nil { + errs = append(errs, + trace.ConvertSystemError(os.Remove(u.checkpointFile.Name()))) + } + return trace.NewAggregate(errs...) +} + +func (u *Uploader) startUpload(fileName string) error { + sessionID, err := sessionIDFromPath(fileName) + if err != nil { + return trace.Wrap(err) + } + // Apparently, exclusive lock can be obtained only in RDWR mode on NFS + sessionFilePath := filepath.Join(u.cfg.ScanDir, fileName) + sessionFile, err := os.OpenFile(sessionFilePath, os.O_RDWR, 0) + if err != nil { + return trace.ConvertSystemError(err) + } + if err := utils.FSTryWriteLock(sessionFile); err != nil { + if e := sessionFile.Close(); e != nil { + u.log.WithError(e).Warningf("Failed to close %v.", fileName) + } + return trace.Wrap(err) + } + + upload := &upload{ + sessionID: sessionID, + reader: events.NewProtoReader(sessionFile), + file: sessionFile, + } + upload.checkpointFile, err = os.OpenFile(u.checkpointFilePath(sessionID), os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + if err := upload.Close(); err != nil { + u.log.WithError(err).Warningf("Failed to close upload.") + } + return trace.ConvertSystemError(err) + } + + start := time.Now() + if err := u.takeSemaphore(); err != nil { + if err := upload.Close(); err != nil { + u.log.WithError(err).Warningf("Failed to close upload.") + } + return trace.Wrap(err) + } + u.log.Debugf("Semaphore acquired in %v for upload %v.", time.Since(start), fileName) + go func() { + if err := u.upload(upload); err != nil { + u.log.WithError(err).Warningf("Upload failed.") + u.emitEvent(events.UploadEvent{ + SessionID: string(upload.sessionID), + Error: err, + }) + return + } + u.emitEvent(events.UploadEvent{ + SessionID: string(upload.sessionID), + }) + + }() + return nil +} + +func (u *Uploader) upload(up *upload) error { + defer u.releaseSemaphore() + defer func() { + if err := up.Close(); err != nil { + u.log.WithError(err).Warningf("Failed to close upload.") + } + }() + + var stream events.Stream + status, err := up.readStatus() + if err != nil { + if !trace.IsNotFound(err) { + return trace.Wrap(err) + } + u.log.Debugf("Starting upload for session %v.", up.sessionID) + stream, err = u.cfg.Streamer.CreateAuditStream(u.ctx, up.sessionID) + if err != nil { + return trace.Wrap(err) + } + } else { + u.log.Debugf("Resuming upload for session %v, upload ID %v.", up.sessionID, status.UploadID) + stream, err = u.cfg.Streamer.ResumeAuditStream(u.ctx, up.sessionID, status.UploadID) + if err != nil { + if !trace.IsNotFound(err) { + return trace.Wrap(err) + } + u.log.WithError(err).Warningf( + "Upload for sesion %v, upload ID %v is not found starting a new upload from scratch.", + up.sessionID, status.UploadID) + status = nil + stream, err = u.cfg.Streamer.CreateAuditStream(u.ctx, up.sessionID) + if err != nil { + return trace.Wrap(err) + } + } + } + + defer func() { + if err := stream.Close(u.ctx); err != nil { + if trace.Unwrap(err) != io.EOF { + u.log.WithError(err).Debugf("Failed to close stream.") + } + } + }() + + // The call to CreateAuditStream is async. To learn + // if it was successful get the first status update + // sent by the server after create. + select { + case <-stream.Status(): + case <-time.After(defaults.NetworkRetryDuration): + return trace.ConnectionProblem(nil, "timeout waiting for stream status update") + case <-u.ctx.Done(): + return trace.ConnectionProblem(u.ctx.Err(), "operation has been cancelled") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go u.monitorStreamStatus(u.ctx, up, stream, cancel) + + start := u.cfg.Clock.Now().UTC() + for { + event, err := up.reader.Read(ctx) + if err != nil { + if err == io.EOF { + break + } + return trace.Wrap(err) + } + // skip events that have been already submitted + if status != nil && event.GetIndex() <= status.LastEventIndex { + continue + } + if err := stream.EmitAuditEvent(u.ctx, event); err != nil { + return trace.Wrap(err) + } + } + + if err := stream.Complete(u.ctx); err != nil { + u.log.WithError(err).Errorf("Failed to complete upload.") + return trace.Wrap(err) + } + + // make sure that checkpoint writer goroutine finishes + // before the files are closed to avoid async writes + // the timeout is a defensive measure to avoid blocking + // indefinitely in case of unforeseen error (e.g. write taking too long) + wctx, wcancel := context.WithTimeout(ctx, defaults.DefaultDialTimeout) + defer wcancel() + + <-wctx.Done() + if errors.Is(wctx.Err(), context.DeadlineExceeded) { + u.log.WithError(wctx.Err()).Warningf( + "Checkpoint function failed to complete the write due to timeout. Possible slow disk write.") + } + + u.log.WithFields(log.Fields{"duration": u.cfg.Clock.Since(start), "session-id": up.sessionID}).Infof("Session upload completed.") + // In linux it is possible to remove a file while holding a file descriptor + if err := up.removeFiles(); err != nil { + u.log.WithError(err).Warningf("Failed to remove session files.") + } + return nil +} + +// monitorStreamStatus monitors stream's status +// and checkpoints the stream +func (u *Uploader) monitorStreamStatus(ctx context.Context, up *upload, stream events.Stream, cancel context.CancelFunc) { + defer cancel() + for { + select { + case <-ctx.Done(): + return + case <-stream.Done(): + return + case status := <-stream.Status(): + if err := up.writeStatus(status); err != nil { + u.log.WithError(err).Debugf("Got stream status: %v.", status) + } else { + u.log.Debugf("Got stream status: %v.", status) + } + } + } +} + +var errContext = fmt.Errorf("context has closed") + +func (u *Uploader) takeSemaphore() error { + select { + case u.semaphore <- struct{}{}: + return nil + case <-u.ctx.Done(): + return errContext + } +} + +func (u *Uploader) releaseSemaphore() error { + select { + case <-u.semaphore: + return nil + case <-u.ctx.Done(): + return errContext + } +} + +func (u *Uploader) emitEvent(e events.UploadEvent) { + if u.cfg.EventsC == nil { + return + } + select { + case u.cfg.EventsC <- e: + return + default: + u.log.Warningf("Skip send event on a blocked channel.") + } +} diff --git a/lib/events/filesessions/fileasync_test.go b/lib/events/filesessions/fileasync_test.go new file mode 100644 index 0000000000000..e3fd8ae959bc9 --- /dev/null +++ b/lib/events/filesessions/fileasync_test.go @@ -0,0 +1,486 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package filesessions + +import ( + "bytes" + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "go.uber.org/atomic" +) + +// TestUploadOK tests async file uploads scenarios +func TestUploadOK(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clock := clockwork.NewFakeClock() + + eventsC := make(chan events.UploadEvent, 100) + memUploader := events.NewMemoryUploader(eventsC) + streamer, err := events.NewProtoStreamer(events.ProtoStreamerConfig{ + Uploader: memUploader, + }) + assert.Nil(t, err) + + scanDir, err := ioutil.TempDir("", "teleport-streams") + assert.Nil(t, err) + defer os.RemoveAll(scanDir) + + scanPeriod := 10 * time.Second + uploader, err := NewUploader(UploaderConfig{ + Context: ctx, + ScanDir: scanDir, + ScanPeriod: scanPeriod, + Streamer: streamer, + Clock: clock, + }) + assert.Nil(t, err) + go uploader.Serve() + + // wait until uploader blocks on the clock + clock.BlockUntil(1) + + defer uploader.Close() + + fileStreamer, err := NewStreamer(scanDir) + assert.Nil(t, err) + + inEvents := events.GenerateTestSession(events.SessionParams{PrintEvents: 1024}) + sid := inEvents[0].(events.SessionMetadataGetter).GetSessionID() + + emitStream(ctx, t, fileStreamer, inEvents) + + // initiate the scan by advancing clock past + // block period + clock.Advance(scanPeriod + time.Second) + + var event events.UploadEvent + select { + case event = <-eventsC: + assert.Equal(t, event.SessionID, sid) + assert.Nil(t, event.Error) + case <-ctx.Done(): + t.Fatalf("Timeout waiting for async upload, try `go test -v` to get more logs for details") + } + + // read the upload and make sure the data is equal + outEvents := readStream(ctx, t, event.UploadID, memUploader) + assert.Equal(t, inEvents, outEvents) +} + +// TestUploadParallel verifies several parallel uploads that have to wait +// for semaphore +func TestUploadParallel(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clock := clockwork.NewFakeClock() + + eventsC := make(chan events.UploadEvent, 100) + memUploader := events.NewMemoryUploader(eventsC) + streamer, err := events.NewProtoStreamer(events.ProtoStreamerConfig{ + Uploader: memUploader, + }) + assert.Nil(t, err) + + scanDir, err := ioutil.TempDir("", "teleport-streams") + assert.Nil(t, err) + defer os.RemoveAll(scanDir) + + scanPeriod := 10 * time.Second + uploader, err := NewUploader(UploaderConfig{ + Context: ctx, + ScanDir: scanDir, + ScanPeriod: scanPeriod, + Streamer: streamer, + Clock: clock, + ConcurrentUploads: 2, + }) + assert.Nil(t, err) + go uploader.Serve() + // wait until uploader blocks on the clock + clock.BlockUntil(1) + + defer uploader.Close() + + sessions := make(map[string][]events.AuditEvent) + + for i := 0; i < 5; i++ { + fileStreamer, err := NewStreamer(scanDir) + assert.Nil(t, err) + + sessionEvents := events.GenerateTestSession(events.SessionParams{PrintEvents: 1024}) + sid := sessionEvents[0].(events.SessionMetadataGetter).GetSessionID() + + emitStream(ctx, t, fileStreamer, sessionEvents) + sessions[sid] = sessionEvents + } + + // initiate the scan by advancing the clock past + // block period + clock.Advance(scanPeriod + time.Second) + + for range sessions { + var event events.UploadEvent + var sessionEvents []events.AuditEvent + var found bool + select { + case event = <-eventsC: + log.Debugf("Got upload event %v", event) + assert.Nil(t, event.Error) + sessionEvents, found = sessions[event.SessionID] + assert.Equal(t, found, true, + "session %q is not expected, possible duplicate event", event.SessionID) + case <-ctx.Done(): + t.Fatalf("Timeout waiting for async upload, try `go test -v` to get more logs for details") + } + + // read the upload and make sure the data is equal + outEvents := readStream(ctx, t, event.UploadID, memUploader) + + assert.Equal(t, sessionEvents, outEvents) + + delete(sessions, event.SessionID) + } +} + +type resumeTestCase struct { + name string + newTest func(streamer events.Streamer) resumeTestTuple + // retries is how many times the uploader will retry the upload + // after the first upload attempt fails + retries int + // onRetry is called on retry attempt + onRetry func(t *testing.T, attempt int, uploader *Uploader) +} + +type resumeTestTuple struct { + streamer *events.CallbackStreamer + verify func(t *testing.T, tc resumeTestCase) +} + +// TestUploadResume verifies successful upload run after the stream has been interrupted +func TestUploadResume(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) + testCases := []resumeTestCase{ + { + name: "stream terminates in the middle of submission", + retries: 1, + newTest: func(streamer events.Streamer) resumeTestTuple { + streamResumed := atomic.NewUint64(0) + terminateConnection := atomic.NewUint64(1) + + callbackStreamer, err := events.NewCallbackStreamer(events.CallbackStreamerConfig{ + Inner: streamer, + OnEmitAuditEvent: func(ctx context.Context, sid session.ID, event events.AuditEvent) error { + if event.GetIndex() > 600 && terminateConnection.CAS(1, 0) == true { + log.Debugf("Terminating connection at event %v", event.GetIndex()) + return trace.ConnectionProblem(nil, "connection terminated") + } + return nil + }, + OnResumeAuditStream: func(ctx context.Context, sid session.ID, uploadID string, streamer events.Streamer) (events.Stream, error) { + stream, err := streamer.ResumeAuditStream(ctx, sid, uploadID) + assert.Nil(t, err) + streamResumed.Inc() + return stream, nil + }, + }) + assert.Nil(t, err) + return resumeTestTuple{ + streamer: callbackStreamer, + verify: func(t *testing.T, tc resumeTestCase) { + assert.Equal(t, 1, int(streamResumed.Load()), tc.name) + }, + } + }, + }, + { + name: "stream terminates multiple times at different stages of submission", + retries: 10, + newTest: func(streamer events.Streamer) resumeTestTuple { + streamResumed := atomic.NewUint64(0) + terminateConnection := atomic.NewUint64(0) + + callbackStreamer, err := events.NewCallbackStreamer(events.CallbackStreamerConfig{ + Inner: streamer, + OnEmitAuditEvent: func(ctx context.Context, sid session.ID, event events.AuditEvent) error { + if event.GetIndex() > 600 && terminateConnection.Inc() <= 10 { + log.Debugf("Terminating connection #%v at event %v", terminateConnection.Load(), event.GetIndex()) + return trace.ConnectionProblem(nil, "connection terminated") + } + return nil + }, + OnResumeAuditStream: func(ctx context.Context, sid session.ID, uploadID string, streamer events.Streamer) (events.Stream, error) { + stream, err := streamer.ResumeAuditStream(ctx, sid, uploadID) + assert.Nil(t, err) + streamResumed.Inc() + return stream, nil + }, + }) + assert.Nil(t, err) + return resumeTestTuple{ + streamer: callbackStreamer, + verify: func(t *testing.T, tc resumeTestCase) { + assert.Equal(t, 10, int(streamResumed.Load()), tc.name) + }, + } + }, + }, + { + name: "stream resumes if upload is not found", + retries: 1, + newTest: func(streamer events.Streamer) resumeTestTuple { + streamCreated := atomic.NewUint64(0) + terminateConnection := atomic.NewUint64(1) + + callbackStreamer, err := events.NewCallbackStreamer(events.CallbackStreamerConfig{ + Inner: streamer, + OnEmitAuditEvent: func(ctx context.Context, sid session.ID, event events.AuditEvent) error { + if event.GetIndex() > 600 && terminateConnection.CAS(1, 0) == true { + log.Debugf("Terminating connection at event %v", event.GetIndex()) + return trace.ConnectionProblem(nil, "connection terminated") + } + return nil + }, + OnCreateAuditStream: func(ctx context.Context, sid session.ID, streamer events.Streamer) (events.Stream, error) { + stream, err := streamer.CreateAuditStream(ctx, sid) + assert.Nil(t, err) + streamCreated.Inc() + return stream, nil + }, + OnResumeAuditStream: func(ctx context.Context, sid session.ID, uploadID string, streamer events.Streamer) (events.Stream, error) { + return nil, trace.NotFound("stream not found") + }, + }) + assert.Nil(t, err) + return resumeTestTuple{ + streamer: callbackStreamer, + verify: func(t *testing.T, tc resumeTestCase) { + assert.Equal(t, 2, int(streamCreated.Load()), tc.name) + }, + } + }, + }, + { + name: "stream created when checkpoint is lost after failure", + retries: 1, + onRetry: func(t *testing.T, attempt int, uploader *Uploader) { + files, err := ioutil.ReadDir(uploader.cfg.ScanDir) + assert.Nil(t, err) + checkpointsDeleted := 0 + for i := range files { + fi := files[i] + if fi.IsDir() { + continue + } + if filepath.Ext(fi.Name()) == checkpointExt { + err := os.Remove(filepath.Join(uploader.cfg.ScanDir, fi.Name())) + assert.Nil(t, err) + log.Debugf("Deleted checkpoint file: %v.", fi.Name()) + checkpointsDeleted++ + } + } + assert.Equal(t, 1, checkpointsDeleted, "expected to delete checkpoint file") + }, + newTest: func(streamer events.Streamer) resumeTestTuple { + streamCreated := atomic.NewUint64(0) + terminateConnection := atomic.NewUint64(1) + streamResumed := atomic.NewUint64(0) + + callbackStreamer, err := events.NewCallbackStreamer(events.CallbackStreamerConfig{ + Inner: streamer, + OnEmitAuditEvent: func(ctx context.Context, sid session.ID, event events.AuditEvent) error { + if event.GetIndex() > 600 && terminateConnection.CAS(1, 0) == true { + log.Debugf("Terminating connection at event %v", event.GetIndex()) + return trace.ConnectionProblem(nil, "connection terminated") + } + return nil + }, + OnCreateAuditStream: func(ctx context.Context, sid session.ID, streamer events.Streamer) (events.Stream, error) { + stream, err := streamer.CreateAuditStream(ctx, sid) + assert.Nil(t, err) + streamCreated.Inc() + return stream, nil + }, + OnResumeAuditStream: func(ctx context.Context, sid session.ID, uploadID string, streamer events.Streamer) (events.Stream, error) { + stream, err := streamer.ResumeAuditStream(ctx, sid, uploadID) + assert.Nil(t, err) + streamResumed.Inc() + return stream, nil + }, + }) + assert.Nil(t, err) + return resumeTestTuple{ + streamer: callbackStreamer, + verify: func(t *testing.T, tc resumeTestCase) { + assert.Equal(t, 2, int(streamCreated.Load()), tc.name) + assert.Equal(t, 0, int(streamResumed.Load()), tc.name) + }, + } + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runResume(t, tc) + }) + } +} + +// runResume runs resume scenario based on the test case specification +func runResume(t *testing.T, testCase resumeTestCase) { + log.Debugf("Running test %q.", testCase.name) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clock := clockwork.NewFakeClock() + eventsC := make(chan events.UploadEvent, 100) + memUploader := events.NewMemoryUploader(eventsC) + streamer, err := events.NewProtoStreamer(events.ProtoStreamerConfig{ + Uploader: memUploader, + MinUploadBytes: 1024, + }) + assert.Nil(t, err) + + test := testCase.newTest(streamer) + + scanDir, err := ioutil.TempDir("", "teleport-streams") + assert.Nil(t, err) + defer os.RemoveAll(scanDir) + + scanPeriod := 10 * time.Second + uploader, err := NewUploader(UploaderConfig{ + EventsC: eventsC, + Context: ctx, + ScanDir: scanDir, + ScanPeriod: scanPeriod, + Streamer: test.streamer, + Clock: clock, + }) + assert.Nil(t, err) + go uploader.Serve() + // wait until uploader blocks on the clock + clock.BlockUntil(1) + + defer uploader.Close() + + fileStreamer, err := NewStreamer(scanDir) + assert.Nil(t, err) + + inEvents := events.GenerateTestSession(events.SessionParams{PrintEvents: 1024}) + sid := inEvents[0].(events.SessionMetadataGetter).GetSessionID() + + emitStream(ctx, t, fileStreamer, inEvents) + + // initiate the scan by advancing clock past + // block period + clock.Advance(scanPeriod + time.Second) + + // wait for the upload failure + var event events.UploadEvent + select { + case event = <-eventsC: + assert.Equal(t, event.SessionID, sid) + assert.IsType(t, trace.ConnectionProblem(nil, "connection problem"), event.Error) + case <-ctx.Done(): + t.Fatalf("Timeout waiting for async upload, try `go test -v` to get more logs for details") + } + + for i := 0; i < testCase.retries; i++ { + if testCase.onRetry != nil { + testCase.onRetry(t, i, uploader) + } + clock.BlockUntil(1) + clock.Advance(scanPeriod + time.Second) + + // wait for upload success + select { + case event = <-eventsC: + assert.Equal(t, event.SessionID, sid) + if i == testCase.retries-1 { + assert.Nil(t, event.Error) + } else { + assert.IsType(t, trace.ConnectionProblem(nil, "connection problem"), event.Error) + } + case <-ctx.Done(): + t.Fatalf("Timeout waiting for async upload, try `go test -v` to get more logs for details") + } + } + + // read the upload and make sure the data is equal + outEvents := readStream(ctx, t, event.UploadID, memUploader) + + assert.Equal(t, inEvents, outEvents) + + // perform additional checks as defined by test case + test.verify(t, testCase) +} + +// emitStream creates and sends the session stream +func emitStream(ctx context.Context, t *testing.T, streamer events.Streamer, inEvents []events.AuditEvent) { + sid := inEvents[0].(events.SessionMetadataGetter).GetSessionID() + + stream, err := streamer.CreateAuditStream(ctx, session.ID(sid)) + assert.Nil(t, err) + for _, event := range inEvents { + err := stream.EmitAuditEvent(ctx, event) + assert.Nil(t, err) + } + err = stream.Complete(ctx) + assert.Nil(t, err) +} + +// readStream reads and decodes the audit stream from uploadID +func readStream(ctx context.Context, t *testing.T, uploadID string, uploader *events.MemoryUploader) []events.AuditEvent { + parts, err := uploader.GetParts(uploadID) + assert.Nil(t, err) + + var outEvents []events.AuditEvent + var reader *events.ProtoReader + for i, part := range parts { + if i == 0 { + reader = events.NewProtoReader(bytes.NewReader(part)) + } else { + err := reader.Reset(bytes.NewReader(part)) + assert.Nil(t, err) + } + out, err := reader.ReadAll(ctx) + assert.Nil(t, err, "part crash %#v", part) + outEvents = append(outEvents, out...) + } + return outEvents +} diff --git a/lib/events/filesessions/filestream.go b/lib/events/filesessions/filestream.go new file mode 100644 index 0000000000000..0f26024175f68 --- /dev/null +++ b/lib/events/filesessions/filestream.go @@ -0,0 +1,325 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filesessions + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + "github.com/pborman/uuid" +) + +// NewStreamer creates a streamer sending uploads to disk +func NewStreamer(dir string) (*events.ProtoStreamer, error) { + handler, err := NewHandler(Config{ + Directory: dir, + }) + if err != nil { + return nil, trace.Wrap(err) + } + return events.NewProtoStreamer(events.ProtoStreamerConfig{ + Uploader: handler, + MinUploadBytes: events.MaxProtoMessageSizeBytes * 2, + }) +} + +// CreateUpload creates a multipart upload +func (h *Handler) CreateUpload(ctx context.Context, sessionID session.ID) (*events.StreamUpload, error) { + start := time.Now() + defer func() { h.Infof("Upload created in %v.", time.Since(start)) }() + + if err := os.MkdirAll(h.uploadsPath(), teleport.PrivateDirMode); err != nil { + return nil, trace.ConvertSystemError(err) + } + + upload := events.StreamUpload{ + SessionID: sessionID, + ID: uuid.New(), + } + if err := upload.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + if err := os.MkdirAll(h.uploadPath(upload), teleport.PrivateDirMode); err != nil { + return nil, trace.Wrap(err) + } + + return &upload, nil +} + +// UploadPart uploads part +func (h *Handler) UploadPart(ctx context.Context, upload events.StreamUpload, partNumber int64, partBody io.ReadSeeker) (*events.StreamPart, error) { + start := time.Now() + defer func() { + h.Debugf("UploadPart(%v) part(%v) uploaded in %v.", upload.ID, partNumber, time.Since(start)) + }() + + if err := checkUpload(upload); err != nil { + return nil, trace.Wrap(err) + } + + partPath := h.partPath(upload, partNumber) + file, err := os.OpenFile(partPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return nil, trace.ConvertSystemError(err) + } + defer file.Close() + + if _, err := io.Copy(file, partBody); err != nil { + if err := os.Remove(partPath); err != nil { + h.WithError(err).Warningf("Failed to remove file %v.", partPath) + } + return nil, trace.Wrap(err) + } + + return &events.StreamPart{Number: partNumber}, nil +} + +// CompleteUpload completes the upload +func (h *Handler) CompleteUpload(ctx context.Context, upload events.StreamUpload, parts []events.StreamPart) error { + start := time.Now() + defer func() { h.Debugf("UploadPart(%v) completed in %v.", upload.ID, time.Since(start)) }() + + if len(parts) == 0 { + return trace.BadParameter("need at least one part to complete the upload") + } + if err := checkUpload(upload); err != nil { + return trace.Wrap(err) + } + + // Parts must be sorted in PartNumber order. + sort.Slice(parts, func(i, j int) bool { + return parts[i].Number < parts[j].Number + }) + + uploadPath := h.path(upload.SessionID) + + // Prevent other processes from accessing this file until the write is completed + f, err := os.OpenFile(uploadPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return trace.ConvertSystemError(err) + } + if err := utils.FSTryWriteLock(f); err != nil { + return trace.Wrap(err) + } + defer f.Close() + defer utils.FSUnlock(f) + + files := make([]*os.File, 0, len(parts)) + readers := make([]io.Reader, 0, len(parts)) + + defer func() { + for i := 0; i < len(files); i++ { + if err := files[i].Close(); err != nil { + h.WithError(err).Errorf("Failed to close file %v", files[i].Name()) + } + } + }() + + for _, part := range parts { + partPath := h.partPath(upload, part.Number) + file, err := os.Open(partPath) + if err != nil { + return trace.ConvertSystemError(err) + } + files = append(files, file) + readers = append(readers, file) + } + + _, err = io.Copy(f, io.MultiReader(readers...)) + if err != nil { + return trace.Wrap(err) + } + + err = h.Config.OnBeforeComplete(ctx, upload) + if err != nil { + return trace.Wrap(err) + } + + err = os.RemoveAll(h.uploadRootPath(upload)) + if err != nil { + h.WithError(err).Errorf("Failed to remove upload %v.", upload.ID) + } + return nil +} + +// ListParts lists upload parts +func (h *Handler) ListParts(ctx context.Context, upload events.StreamUpload) ([]events.StreamPart, error) { + var parts []events.StreamPart + if err := checkUpload(upload); err != nil { + return nil, trace.Wrap(err) + } + err := filepath.Walk(h.uploadPath(upload), func(path string, info os.FileInfo, err error) error { + if err != nil { + err = trace.ConvertSystemError(err) + if trace.IsNotFound(err) { + return nil + } + return err + } + if info.IsDir() { + return nil + } + part, err := partFromFileName(path) + if err != nil { + h.WithError(err).Debugf("Skipping file %v.", path) + return nil + } + parts = append(parts, events.StreamPart{ + Number: part, + }) + return nil + }) + if err != nil { + return nil, trace.Wrap(err) + } + // Parts must be sorted in PartNumber order. + sort.Slice(parts, func(i, j int) bool { + return parts[i].Number < parts[j].Number + }) + return parts, nil +} + +// ListUploads lists uploads that have been initiated but not completed with +// earlier uploads returned first +func (h *Handler) ListUploads(ctx context.Context) ([]events.StreamUpload, error) { + var uploads []events.StreamUpload + + dirs, err := ioutil.ReadDir(h.uploadsPath()) + if err != nil { + err = trace.ConvertSystemError(err) + // The upload folder may not exist if there are no uploads yet. + if trace.IsNotFound(err) { + return nil, nil + } + return nil, trace.Wrap(err) + } + + for _, dir := range dirs { + if !dir.IsDir() { + continue + } + uploadID := dir.Name() + if err := checkUploadID(uploadID); err != nil { + h.WithError(err).Warningf("Skipping upload %v with bad format.", uploadID) + continue + } + files, err := ioutil.ReadDir(filepath.Join(h.uploadsPath(), dir.Name())) + if err != nil { + return nil, trace.ConvertSystemError(err) + } + // expect just one subdirectory - session ID + if len(files) != 1 { + h.WithError(err).Warningf("Skipping upload %v, missing subdirectory.", uploadID) + continue + } + if !files[0].IsDir() { + h.WithError(err).Warningf("Skipping upload %v, not a directory.", uploadID) + continue + } + uploads = append(uploads, events.StreamUpload{ + SessionID: session.ID(filepath.Base(files[0].Name())), + ID: uploadID, + Initiated: dir.ModTime(), + }) + } + sort.Slice(uploads, func(i, j int) bool { + return uploads[i].Initiated.Before(uploads[j].Initiated) + }) + return uploads, nil +} + +func (h *Handler) uploadsPath() string { + return filepath.Join(h.Directory, uploadsDir) +} + +func (h *Handler) uploadRootPath(upload events.StreamUpload) string { + return filepath.Join(h.uploadsPath(), upload.ID) +} + +func (h *Handler) uploadPath(upload events.StreamUpload) string { + return filepath.Join(h.uploadRootPath(upload), string(upload.SessionID)) +} + +func (h *Handler) partPath(upload events.StreamUpload, partNumber int64) string { + return filepath.Join(h.uploadPath(upload), partFileName(partNumber)) +} + +func partFileName(partNumber int64) string { + return fmt.Sprintf("%v%v", partNumber, partExt) +} + +func partFromFileName(fileName string) (int64, error) { + base := filepath.Base(fileName) + if filepath.Ext(base) != partExt { + return -1, trace.BadParameter("expected extension %v, got %v", partExt, base) + } + numberString := strings.TrimSuffix(base, partExt) + partNumber, err := strconv.ParseInt(numberString, 10, 0) + if err != nil { + return -1, trace.Wrap(err) + } + return partNumber, nil +} + +// checkUpload checks that upload IDs are valid +// and in addition verifies that upload ID is a valid UUID +// to avoid file scanning by passing bogus upload ID file paths +func checkUpload(upload events.StreamUpload) error { + if err := upload.CheckAndSetDefaults(); err != nil { + return trace.Wrap(err) + } + if err := checkUploadID(upload.ID); err != nil { + return trace.Wrap(err) + } + return nil +} + +// checkUploadID checks that upload ID is a valid UUID +// to avoid path scanning or using local paths as upload IDs +func checkUploadID(uploadID string) error { + out := uuid.Parse(uploadID) + if out == nil { + return trace.BadParameter("bad format of upload ID") + } + return nil +} + +const ( + // uploadsDir is a directory with multipart uploads + uploadsDir = "multi" + // partExt is a part extension + partExt = ".part" + // tarExt is a suffix for file uploads + tarExt = ".tar" + // checkpointExt is a suffix for checkpoint extensions + checkpointExt = ".checkpoint" +) diff --git a/lib/events/filesessions/fileuploader.go b/lib/events/filesessions/fileuploader.go index 4fded70c7e5f3..1bba40df1457e 100644 --- a/lib/events/filesessions/fileuploader.go +++ b/lib/events/filesessions/fileuploader.go @@ -22,8 +22,10 @@ import ( "io" "os" "path/filepath" + "strings" "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/session" "github.com/gravitational/teleport/lib/utils" @@ -35,6 +37,13 @@ import ( type Config struct { // Directory is a directory with files Directory string + // OnBeforeComplete can be used to inject failures during tests + OnBeforeComplete func(ctx context.Context, upload events.StreamUpload) error +} + +// nopBeforeComplete does nothing +func nopBeforeComplete(ctx context.Context, upload events.StreamUpload) error { + return nil } // CheckAndSetDefaults checks and sets default values of file handler config @@ -42,9 +51,12 @@ func (s *Config) CheckAndSetDefaults() error { if s.Directory == "" { return trace.BadParameter("missing parameter Directory") } - if !utils.IsDir(s.Directory) { + if utils.IsDir(s.Directory) == false { return trace.BadParameter("path %q does not exist or is not a directory", s.Directory) } + if s.OnBeforeComplete == nil { + s.OnBeforeComplete = nopBeforeComplete + } return nil } @@ -110,5 +122,18 @@ func (l *Handler) Upload(ctx context.Context, sessionID session.ID, reader io.Re } func (l *Handler) path(sessionID session.ID) string { - return filepath.Join(l.Directory, string(sessionID)+".tar") + return filepath.Join(l.Directory, string(sessionID)+tarExt) +} + +// sessionIDFromPath extracts session ID from the filename +func sessionIDFromPath(path string) (session.ID, error) { + base := filepath.Base(path) + if filepath.Ext(base) != tarExt { + return session.ID(""), trace.BadParameter("expected extension %v, got %v", tarExt, base) + } + sid := session.ID(strings.TrimSuffix(base, tarExt)) + if err := sid.Check(); err != nil { + return session.ID(""), trace.Wrap(err) + } + return sid, nil } diff --git a/lib/events/filesessions/fileuploader_test.go b/lib/events/filesessions/fileuploader_test.go index c301daedaf795..fa2f1dd4dbd6e 100644 --- a/lib/events/filesessions/fileuploader_test.go +++ b/lib/events/filesessions/fileuploader_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Gravitational, Inc. +Copyright 2018-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,36 +18,57 @@ limitations under the License. package filesessions import ( + "context" + "io/ioutil" + "os" "testing" + "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/events/test" "github.com/gravitational/teleport/lib/utils" - "gopkg.in/check.v1" + "github.com/gravitational/trace" + "github.com/stretchr/testify/assert" + "go.uber.org/atomic" ) -func TestFile(t *testing.T) { check.TestingT(t) } +// TestStreams tests various streaming upload scenarios +func TestStreams(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) -type FileSuite struct { - test.HandlerSuite -} - -var _ = check.Suite(&FileSuite{}) - -func (s *FileSuite) SetUpSuite(c *check.C) { - utils.InitLoggerForTests() + dir, err := ioutil.TempDir("", "teleport-streams") + assert.Nil(t, err) + defer os.RemoveAll(dir) - var err error - s.HandlerSuite.Handler, err = NewHandler(Config{ - Directory: c.MkDir(), + handler, err := NewHandler(Config{ + Directory: dir, }) - c.Assert(err, check.IsNil) -} + assert.Nil(t, err) + defer handler.Close() -func (s *FileSuite) TestUploadDownload(c *check.C) { - s.UploadDownload(c) -} + t.Run("Stream", func(t *testing.T) { + test.Stream(t, handler) + }) + t.Run("Resume", func(t *testing.T) { + completeCount := atomic.NewUint64(0) + handler, err := NewHandler(Config{ + Directory: dir, + OnBeforeComplete: func(ctx context.Context, upload events.StreamUpload) error { + if completeCount.Inc() <= 1 { + return trace.ConnectionProblem(nil, "simulate failure %v", completeCount.Load()) + } + return nil + }, + }) + assert.Nil(t, err) + defer handler.Close() -func (s *FileSuite) TestDownloadNotFound(c *check.C) { - s.DownloadNotFound(c) + test.StreamResumeManyParts(t, handler) + }) + t.Run("UploadDownload", func(t *testing.T) { + test.UploadDownload(t, handler) + }) + t.Run("DownloadNotFound", func(t *testing.T) { + test.DownloadNotFound(t, handler) + }) } diff --git a/lib/events/firestoreevents/firestoreevents.go b/lib/events/firestoreevents/firestoreevents.go index 442bb1e4dd790..eedcbe6ae1946 100644 --- a/lib/events/firestoreevents/firestoreevents.go +++ b/lib/events/firestoreevents/firestoreevents.go @@ -305,8 +305,8 @@ func New(cfg EventsConfig) (*Log, error) { return b, nil } -// EmitAuditEvent emits audit event -func (l *Log) EmitAuditEvent(ev events.Event, fields events.EventFields) error { +// EmitAuditEventLegacy emits audit event +func (l *Log) EmitAuditEventLegacy(ev events.Event, fields events.EventFields) error { sessionID := fields.GetString(events.SessionEventID) eventIndex := fields.GetInt(events.EventIndex) // no session id - global event gets a random uuid to get a good partition diff --git a/lib/events/forward.go b/lib/events/forward.go index 85cece2697496..1a0478c6d8256 100644 --- a/lib/events/forward.go +++ b/lib/events/forward.go @@ -117,8 +117,8 @@ func (l *Forwarder) Close() error { return l.sessionLogger.Finalize() } -// EmitAuditEvent emits audit event -func (l *Forwarder) EmitAuditEvent(event Event, fields EventFields) error { +// EmitAuditEventLegacy emits audit event +func (l *Forwarder) EmitAuditEventLegacy(event Event, fields EventFields) error { err := UpdateEventFields(event, fields, l.Clock, l.UID) if err != nil { return trace.Wrap(err) diff --git a/lib/events/gcssessions/gcshandler.go b/lib/events/gcssessions/gcshandler.go index 5c99d9ed08c44..faeca2c1940d5 100644 --- a/lib/events/gcssessions/gcshandler.go +++ b/lib/events/gcssessions/gcshandler.go @@ -1,6 +1,5 @@ -package gcssessions - /* +Copyright 2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,9 +12,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - */ +package gcssessions + import ( "context" "fmt" @@ -105,6 +105,12 @@ type Config struct { KMSKeyName string // Endpoint Endpoint string + // OnComposerRun is used for fault injection in tests + // runs (or doesn't run composer and returns error + OnComposerRun func(ctx context.Context, composer *storage.Composer) (*storage.ObjectAttrs, error) + // AfterObjectDelete is used for fault injection in tests + // runs (or doesn't run object delete) and returns error + AfterObjectDelete func(ctx context.Context, object *storage.ObjectHandle, error error) error } // SetFromURL sets values on the Config from the supplied URI @@ -134,19 +140,38 @@ func (cfg *Config) SetFromURL(url *url.URL) error { if projectIDParamString == "" { return trace.BadParameter("parameter %s with value '%s' is invalid", projectID, projectIDParamString) - } else { - cfg.ProjectID = projectIDParamString } + cfg.ProjectID = projectIDParamString if url.Host == "" { return trace.BadParameter("host should be set to the bucket name for recording storage") - } else { - cfg.Bucket = url.Host } + cfg.Bucket = url.Host return nil } +// CheckAndSetDefaults checks and sets default values +func (cfg *Config) CheckAndSetDefaults() error { + if cfg.OnComposerRun == nil { + cfg.OnComposerRun = composerRun + } + if cfg.AfterObjectDelete == nil { + cfg.AfterObjectDelete = afterObjectDelete + } + return nil +} + +// afterObjectDelete is a passthrough function to delete an object +func afterObjectDelete(ctx context.Context, object *storage.ObjectHandle, err error) error { + return nil +} + +// ComposerRun is a passthrough function that runs composer +func composerRun(ctx context.Context, composer *storage.Composer) (*storage.ObjectAttrs, error) { + return composer.Run(ctx) +} + // DefaultNewHandler returns a new handler with default GCS client settings derived from the config func DefaultNewHandler(cfg Config) (*Handler, error) { var args []option.ClientOption @@ -168,6 +193,9 @@ func DefaultNewHandler(cfg Config) (*Handler, error) { // NewHandler returns a new handler with specific context, cancelFunc, and client func NewHandler(ctx context.Context, cancelFunc context.CancelFunc, cfg Config, client *storage.Client) (*Handler, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } h := &Handler{ Entry: log.WithFields(log.Fields{ trace.Component: teleport.Component(teleport.SchemeGCS), @@ -200,7 +228,7 @@ type Handler struct { clientCancel context.CancelFunc } -// Closer releases connection and resources associated with log if any +// Close releases connection and resources associated with log if any func (h *Handler) Close() error { h.clientCancel() return h.gcsClient.Close() @@ -210,7 +238,7 @@ func (h *Handler) Close() error { // and returns the target GCS bucket path in case of successful upload. func (h *Handler) Upload(ctx context.Context, sessionID session.ID, reader io.Reader) (string, error) { path := h.path(sessionID) - h.Logger.Debugf("uploading %s", path) + h.Logger.Debugf("Uploading %s.", path) // Make sure we don't overwrite an existing recording. _, err := h.gcsClient.Bucket(h.Config.Bucket).Object(path).Attrs(ctx) @@ -241,7 +269,7 @@ func (h *Handler) Upload(ctx context.Context, sessionID session.ID, reader io.Re // return trace.NotFound error is object is not found func (h *Handler) Download(ctx context.Context, sessionID session.ID, writerAt io.WriterAt) error { path := h.path(sessionID) - h.Logger.Debugf("downloading %s", path) + h.Logger.Debugf("Downloading %s.", path) writer, ok := writerAt.(io.Writer) if !ok { return trace.BadParameter("the provided writerAt is %T which does not implement io.Writer", writerAt) diff --git a/lib/events/gcssessions/gcshandler_test.go b/lib/events/gcssessions/gcshandler_test.go index e40e124e0cf15..3cc876c4a617d 100644 --- a/lib/events/gcssessions/gcshandler_test.go +++ b/lib/events/gcssessions/gcshandler_test.go @@ -21,53 +21,36 @@ import ( "fmt" "testing" - "github.com/fsouza/fake-gcs-server/fakestorage" "github.com/gravitational/teleport/lib/events/test" "github.com/gravitational/teleport/lib/utils" + + "github.com/fsouza/fake-gcs-server/fakestorage" "github.com/pborman/uuid" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" ) -func TestGCS(t *testing.T) { check.TestingT(t) } - -type GCSSuite struct { - handler *Handler - test.HandlerSuite - gcsServer *fakestorage.Server -} - -var _ = check.Suite(&GCSSuite{}) - -func (s *GCSSuite) SetUpSuite(c *check.C) { +// TestFakeStreams tests various streaming upload scenarios +// using fake GCS background +func TestFakeStreams(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) server := *fakestorage.NewServer([]fakestorage.Object{}) - s.gcsServer = &server + defer server.Stop() ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() - utils.InitLoggerForTests() - - var err error - s.HandlerSuite.Handler, err = NewHandler(ctx, cancelFunc, Config{ + handler, err := NewHandler(ctx, cancelFunc, Config{ Endpoint: server.URL(), Bucket: fmt.Sprintf("teleport-test-%v", uuid.New()), }, server.Client()) - c.Assert(err, check.IsNil) -} - -func (s *GCSSuite) TestUploadDownload(c *check.C) { - s.UploadDownload(c) -} - -func (s *GCSSuite) TestDownloadNotFound(c *check.C) { - s.DownloadNotFound(c) -} - -func (s *GCSSuite) TearDownSuite(c *check.C) { - if s.gcsServer != nil { - s.gcsServer.Stop() - } - if s.handler != nil { - s.handler.Close() - } + assert.Nil(t, err) + defer handler.Close() + + t.Run("UploadDownload", func(t *testing.T) { + test.UploadDownload(t, handler) + }) + t.Run("DownloadNotFound", func(t *testing.T) { + test.DownloadNotFound(t, handler) + }) } diff --git a/lib/events/gcssessions/gcsstream.go b/lib/events/gcssessions/gcsstream.go new file mode 100644 index 0000000000000..e950470251ccf --- /dev/null +++ b/lib/events/gcssessions/gcsstream.go @@ -0,0 +1,381 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcssessions + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/session" + + "cloud.google.com/go/storage" + "github.com/pborman/uuid" + "google.golang.org/api/iterator" + + "github.com/gravitational/trace" +) + +// CreateUpload creates a multipart upload +func (h *Handler) CreateUpload(ctx context.Context, sessionID session.ID) (*events.StreamUpload, error) { + upload := events.StreamUpload{ + ID: uuid.New(), + SessionID: sessionID, + Initiated: time.Now().UTC(), + } + if err := upload.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + uploadPath := h.uploadPath(upload) + + h.Logger.Debugf("Creating upload at %s", uploadPath) + // Make sure we don't overwrite an existing upload + _, err := h.gcsClient.Bucket(h.Config.Bucket).Object(uploadPath).Attrs(ctx) + if err != storage.ErrObjectNotExist { + if err != nil { + return nil, convertGCSError(err) + } + return nil, trace.AlreadyExists("upload %v for session %q already exists in GCS", upload.ID, sessionID) + } + + writer := h.gcsClient.Bucket(h.Config.Bucket).Object(uploadPath).NewWriter(ctx) + start := time.Now() + _, err = io.Copy(writer, strings.NewReader(string(sessionID))) + // Always close the writer, even if upload failed. + closeErr := writer.Close() + if err == nil { + err = closeErr + } + uploadLatencies.Observe(time.Since(start).Seconds()) + uploadRequests.Inc() + if err != nil { + return nil, convertGCSError(err) + } + return &upload, nil +} + +// UploadPart uploads part +func (h *Handler) UploadPart(ctx context.Context, upload events.StreamUpload, partNumber int64, partBody io.ReadSeeker) (*events.StreamPart, error) { + if err := upload.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + partPath := h.partPath(upload, partNumber) + writer := h.gcsClient.Bucket(h.Config.Bucket).Object(partPath).NewWriter(ctx) + start := time.Now() + _, err := io.Copy(writer, partBody) + // Always close the writer, even if upload failed. + closeErr := writer.Close() + if err == nil { + err = closeErr + } + uploadLatencies.Observe(time.Since(start).Seconds()) + uploadRequests.Inc() + if err != nil { + return nil, convertGCSError(err) + } + return &events.StreamPart{Number: partNumber}, nil +} + +// CompleteUpload completes the upload +func (h *Handler) CompleteUpload(ctx context.Context, upload events.StreamUpload, parts []events.StreamPart) error { + if err := upload.CheckAndSetDefaults(); err != nil { + return trace.Wrap(err) + } + + // If the session has been already created, move to cleanup + sessionPath := h.path(upload.SessionID) + _, err := h.gcsClient.Bucket(h.Config.Bucket).Object(sessionPath).Attrs(ctx) + if err != storage.ErrObjectNotExist { + if err != nil { + return convertGCSError(err) + } + return h.cleanupUpload(ctx, upload) + } + + // Makes sure that upload has been properly initiated, + // checks the .upload file + uploadPath := h.uploadPath(upload) + bucket := h.gcsClient.Bucket(h.Config.Bucket) + _, err = bucket.Object(uploadPath).Attrs(ctx) + if err != nil { + return convertGCSError(err) + } + + objects := h.partsToObjects(upload, parts) + for len(objects) > maxParts { + h.Logger.Debugf("Got %v objects for upload %v, performing temp merge.", + len(objects), upload) + objectsToMerge := objects[:maxParts] + mergeID := hashOfNames(objectsToMerge) + mergePath := h.mergePath(upload, mergeID) + mergeObject := bucket.Object(mergePath) + composer := mergeObject.ComposerFrom(objectsToMerge...) + _, err = h.OnComposerRun(ctx, composer) + if err != nil { + return convertGCSError(err) + } + objects = append([]*storage.ObjectHandle{mergeObject}, objects[maxParts:]...) + } + composer := bucket.Object(sessionPath).ComposerFrom(objects...) + _, err = h.OnComposerRun(ctx, composer) + if err != nil { + return convertGCSError(err) + } + h.Logger.Debugf("Got %v objects for upload %v, performed merge.", + len(objects), upload) + return h.cleanupUpload(ctx, upload) +} + +// cleanupUpload iterates through all upload related objects +// and deletes them in parallel +func (h *Handler) cleanupUpload(ctx context.Context, upload events.StreamUpload) error { + prefixes := []string{ + h.partsPrefix(upload), + h.mergesPrefix(upload), + h.uploadPrefix(upload), + } + + bucket := h.gcsClient.Bucket(h.Config.Bucket) + var objects []*storage.ObjectHandle + for _, prefix := range prefixes { + i := bucket.Objects(ctx, &storage.Query{Prefix: prefix, Versions: false}) + for { + attrs, err := i.Next() + if err == iterator.Done { + break + } + if err != nil { + return convertGCSError(err) + } + objects = append(objects, bucket.Object(attrs.Name)) + } + } + + // batch delete objects to speed up the process + semCh := make(chan struct{}, maxParts) + errorsCh := make(chan error, maxParts) + for i := range objects { + select { + case semCh <- struct{}{}: + go func(object *storage.ObjectHandle) { + defer func() { <-semCh }() + err := h.AfterObjectDelete(ctx, object, object.Delete(ctx)) + select { + case errorsCh <- convertGCSError(err): + case <-ctx.Done(): + } + }(objects[i]) + case <-ctx.Done(): + return trace.ConnectionProblem(ctx.Err(), "context closed") + } + } + + var errors []error + for range objects { + select { + case err := <-errorsCh: + if !trace.IsNotFound(err) { + errors = append(errors, err) + } + case <-ctx.Done(): + return trace.ConnectionProblem(ctx.Err(), "context closed") + } + } + return trace.NewAggregate(errors...) +} + +func (h *Handler) partsToObjects(upload events.StreamUpload, parts []events.StreamPart) []*storage.ObjectHandle { + objects := make([]*storage.ObjectHandle, len(parts)) + bucket := h.gcsClient.Bucket(h.Config.Bucket) + for i := 0; i < len(parts); i++ { + objects[i] = bucket.Object(h.partPath(upload, parts[i].Number)) + } + return objects +} + +// ListParts lists upload parts +func (h *Handler) ListParts(ctx context.Context, upload events.StreamUpload) ([]events.StreamPart, error) { + if err := upload.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + i := h.gcsClient.Bucket(h.Config.Bucket).Objects(ctx, &storage.Query{ + Prefix: h.partsPrefix(upload), + }) + var parts []events.StreamPart + for { + attrs, err := i.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, convertGCSError(err) + } + // Skip entries that are not parts + if filepath.Ext(attrs.Name) != partExt { + continue + } + part, err := partFromPath(attrs.Name) + if err != nil { + return nil, trace.Wrap(err) + } + parts = append(parts, *part) + } + return parts, nil +} + +// ListUploads lists uploads that have been initiated but not completed with +// earlier uploads returned first +func (h *Handler) ListUploads(ctx context.Context) ([]events.StreamUpload, error) { + i := h.gcsClient.Bucket(h.Config.Bucket).Objects(ctx, &storage.Query{ + Prefix: h.uploadsPrefix(), + }) + var uploads []events.StreamUpload + for { + attrs, err := i.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, convertGCSError(err) + } + // Skip entries that are not uploads + if filepath.Ext(attrs.Name) != uploadExt { + continue + } + upload, err := uploadFromPath(attrs.Name) + if err != nil { + return nil, trace.Wrap(err) + } + upload.Initiated = attrs.Created + uploads = append(uploads, *upload) + } + return uploads, nil +} + +const ( + // uploadsKey is a key that holds all upload-related objects + uploadsKey = "uploads" + // partsKey is a key that holds all part-related objects + partsKey = "parts" + // mergesKey is a key that holds temp merges to workaround + // google max parts limit + mergesKey = "merges" + // partExt is a part extension + partExt = ".part" + // mergeExt is a merge extension + mergeExt = ".merge" + // uploadExt is upload extension + uploadExt = ".upload" + // slash is a forward slash + slash = "/" + // Maximum parts per compose as set by + // https://cloud.google.com/storage/docs/composite-objects + maxParts = 32 +) + +// uploadsPrefix is "path/uploads" +func (h *Handler) uploadsPrefix() string { + return strings.TrimPrefix(filepath.Join(h.Path, uploadsKey), slash) +} + +// uploadPrefix is "path/uploads/" +func (h *Handler) uploadPrefix(upload events.StreamUpload) string { + return filepath.Join(h.uploadsPrefix(), upload.ID) +} + +// uploadPath is "path/uploads//.upload" +func (h *Handler) uploadPath(upload events.StreamUpload) string { + return filepath.Join(h.uploadPrefix(upload), string(upload.SessionID)) + uploadExt +} + +// partsPrefix is "path/parts/" +// this path is under different tree from upload to make prefix +// iteration of uploads more efficient (that otherwise would have +// scan and skip the parts that could be 5K parts per upload) +func (h *Handler) partsPrefix(upload events.StreamUpload) string { + return strings.TrimPrefix(filepath.Join(h.Path, partsKey, upload.ID), slash) +} + +// partPath is "path/parts//.part" +func (h *Handler) partPath(upload events.StreamUpload, partNumber int64) string { + return filepath.Join(h.partsPrefix(upload), fmt.Sprintf("%v%v", partNumber, partExt)) +} + +// mergesPrefix is "path/merges/" +// this path is under different tree from upload to make prefix +// iteration of uploads more efficient (that otherwise would have +// scan and skip the parts that could be 5K parts per upload) +func (h *Handler) mergesPrefix(upload events.StreamUpload) string { + return strings.TrimPrefix(filepath.Join(h.Path, mergesKey, upload.ID), slash) +} + +// mergePath is "path/merges//.merge" +func (h *Handler) mergePath(upload events.StreamUpload, mergeID string) string { + return filepath.Join(h.mergesPrefix(upload), fmt.Sprintf("%v%v", mergeID, mergeExt)) +} + +// hashOfNames creates an object with hash of names +// to avoid generating new objects for consecutive merge attempts +func hashOfNames(objects []*storage.ObjectHandle) string { + hash := sha256.New() + for _, object := range objects { + hash.Write([]byte(object.ObjectName())) + } + return hex.EncodeToString(hash.Sum(nil)) +} + +func uploadFromPath(path string) (*events.StreamUpload, error) { + dir, file := filepath.Split(path) + if filepath.Ext(file) != uploadExt { + return nil, trace.BadParameter("expected extension %v, got %v", uploadExt, file) + } + sessionID := session.ID(strings.TrimSuffix(file, uploadExt)) + if err := sessionID.Check(); err != nil { + return nil, trace.Wrap(err) + } + parts := strings.Split(dir, slash) + if len(parts) < 2 { + return nil, trace.BadParameter("expected format uploads/, got %v", dir) + } + uploadID := parts[len(parts)-1] + return &events.StreamUpload{ + SessionID: sessionID, + ID: uploadID, + }, nil +} + +func partFromPath(path string) (*events.StreamPart, error) { + base := filepath.Base(path) + if filepath.Ext(base) != partExt { + return nil, trace.BadParameter("expected extension %v, got %v", partExt, base) + } + numberString := strings.TrimSuffix(base, partExt) + partNumber, err := strconv.ParseInt(numberString, 10, 0) + if err != nil { + return nil, trace.Wrap(err) + } + return &events.StreamPart{Number: partNumber}, nil +} diff --git a/lib/events/gcssessions/gcsstream_test.go b/lib/events/gcssessions/gcsstream_test.go new file mode 100644 index 0000000000000..81bddbe0d2483 --- /dev/null +++ b/lib/events/gcssessions/gcsstream_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package gcssessions + +import ( + "context" + "fmt" + "net/url" + "os" + "testing" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/events/test" + "github.com/gravitational/teleport/lib/utils" + + "cloud.google.com/go/storage" + "github.com/gravitational/trace" + "github.com/stretchr/testify/assert" + "go.uber.org/atomic" +) + +// TestStreams tests various streaming upload scenarios +func TestStreams(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) + + uri := os.Getenv(teleport.GCSTestURI) + if uri == "" { + t.Skip( + fmt.Sprintf("Skipping GCS tests, set env var %q, details here: https://gravitational.com/teleport/docs/gcp_guide/", + teleport.GCSTestURI)) + } + u, err := url.Parse(uri) + assert.Nil(t, err) + + config := Config{} + err = config.SetFromURL(u) + assert.NoError(t, err) + + handler, err := DefaultNewHandler(config) + assert.NoError(t, err) + defer handler.Close() + + // Stream with handler and many parts + t.Run("StreamManyParts", func(t *testing.T) { + test.StreamManyParts(t, handler) + }) + t.Run("UploadDownload", func(t *testing.T) { + test.UploadDownload(t, handler) + }) + t.Run("DownloadNotFound", func(t *testing.T) { + test.DownloadNotFound(t, handler) + }) + // This tests makes sure that resume works + // if the first attempt to compose object failed + t.Run("ResumeOnComposeFailure", func(t *testing.T) { + config := Config{} + err = config.SetFromURL(u) + assert.NoError(t, err) + + composeCount := atomic.NewUint64(0) + + config.OnComposerRun = func(ctx context.Context, composer *storage.Composer) (*storage.ObjectAttrs, error) { + if composeCount.Inc() <= 1 { + return nil, trace.ConnectionProblem(nil, "simulate timeout %v", composeCount.Load()) + } + return composer.Run(ctx) + } + + handler, err := DefaultNewHandler(config) + assert.NoError(t, err) + defer handler.Close() + + test.StreamResumeManyParts(t, handler) + }) + // This test makes sure that resume works + // if the attempt to delete the object on cleanup failed + t.Run("ResumeOnCleanupFailure", func(t *testing.T) { + config := Config{} + err = config.SetFromURL(u) + assert.NoError(t, err) + + deleteFailed := atomic.NewUint64(0) + + config.AfterObjectDelete = func(ctx context.Context, object *storage.ObjectHandle, err error) error { + if err != nil { + return err + } + // delete the object, but still simulate failure + if deleteFailed.CAS(0, 1) == true { + return trace.ConnectionProblem(nil, "simulate delete failure %v", deleteFailed.Load()) + } + return nil + } + + handler, err := DefaultNewHandler(config) + assert.NoError(t, err) + defer handler.Close() + + test.StreamResumeManyParts(t, handler) + }) +} diff --git a/lib/events/generate.go b/lib/events/generate.go new file mode 100644 index 0000000000000..0f987cf6da399 --- /dev/null +++ b/lib/events/generate.go @@ -0,0 +1,138 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "bytes" + "time" + + "github.com/jonboulle/clockwork" + "github.com/pborman/uuid" +) + +// SessionParams specifies optional parameters +// for generated session +type SessionParams struct { + // PrintEvents sets up print events count + PrintEvents int64 + // Clock is an optional clock setting start + // and offset time of the event + Clock clockwork.Clock + // ServerID is an optional server ID + ServerID string + // SessionID is an optional session ID to set + SessionID string +} + +// SetDefaults sets parameters defaults +func (p *SessionParams) SetDefaults() { + if p.Clock == nil { + p.Clock = clockwork.NewFakeClockAt( + time.Date(2020, 03, 30, 15, 58, 54, 561*int(time.Millisecond), time.UTC)) + } + if p.ServerID == "" { + p.ServerID = uuid.New() + } + if p.SessionID == "" { + p.SessionID = uuid.New() + } +} + +// GenerateTestSession generates test session events starting with session start +// event, adds printEvents events and returns the result. +func GenerateTestSession(params SessionParams) []AuditEvent { + params.SetDefaults() + sessionStart := SessionStart{ + Metadata: Metadata{ + Index: 0, + Type: SessionStartEvent, + ID: "36cee9e9-9a80-4c32-9163-3d9241cdac7a", + Code: SessionStartCode, + Time: params.Clock.Now().UTC(), + }, + ServerMetadata: ServerMetadata{ + ServerID: params.ServerID, + ServerLabels: map[string]string{ + "kernel": "5.3.0-42-generic", + "date": "Mon Mar 30 08:58:54 PDT 2020", + "group": "gravitational/devc", + }, + ServerHostname: "planet", + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: params.SessionID, + }, + UserMetadata: UserMetadata{ + User: "bob@example.com", + Login: "bob", + }, + ConnectionMetadata: ConnectionMetadata{ + LocalAddr: "127.0.0.1:3022", + RemoteAddr: "[::1]:37718", + }, + TerminalSize: "80:25", + } + + sessionEnd := SessionEnd{ + Metadata: Metadata{ + Index: 20, + Type: SessionEndEvent, + ID: "da455e0f-c27d-459f-a218-4e83b3db9426", + Code: SessionEndCode, + Time: params.Clock.Now().UTC().Add(time.Hour + time.Second + 7*time.Millisecond), + }, + ServerMetadata: ServerMetadata{ + ServerID: params.ServerID, + ServerNamespace: "default", + }, + SessionMetadata: SessionMetadata{ + SessionID: params.SessionID, + }, + UserMetadata: UserMetadata{ + User: "alice@example.com", + }, + EnhancedRecording: true, + Interactive: true, + Participants: []string{"alice@example.com"}, + StartTime: params.Clock.Now().UTC(), + EndTime: params.Clock.Now().UTC().Add(3*time.Hour + time.Second + 7*time.Millisecond), + } + + events := []AuditEvent{&sessionStart} + i := int64(0) + for i = 0; i < params.PrintEvents; i++ { + event := &SessionPrint{ + Metadata: Metadata{ + Index: i + 1, + Type: SessionPrintEvent, + Time: params.Clock.Now().UTC().Add(time.Minute + time.Duration(i)*time.Millisecond), + }, + ChunkIndex: i, + DelayMilliseconds: i, + Offset: i, + Data: bytes.Repeat([]byte("hello"), int(i%177+1)), + } + event.Bytes = int64(len(event.Data)) + event.Time = event.Time.Add(time.Duration(i) * time.Millisecond) + events = append(events, event) + } + i++ + sessionEnd.Metadata.Index = i + events = append(events, &sessionEnd) + return events +} diff --git a/lib/events/memsessions/memstream_test.go b/lib/events/memsessions/memstream_test.go new file mode 100644 index 0000000000000..3ce51ad33375d --- /dev/null +++ b/lib/events/memsessions/memstream_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package memsessions + +import ( + "testing" + + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/events/test" + "github.com/gravitational/teleport/lib/utils" +) + +// TestStreams tests various streaming upload scenarios +func TestStreams(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) + + // Stream with handler and many parts + t.Run("StreamManyParts", func(t *testing.T) { + test.StreamManyParts(t, events.NewMemoryUploader()) + }) + t.Run("UploadDownload", func(t *testing.T) { + test.UploadDownload(t, events.NewMemoryUploader()) + }) + t.Run("DownloadNotFound", func(t *testing.T) { + test.DownloadNotFound(t, events.NewMemoryUploader()) + }) +} diff --git a/lib/events/mock.go b/lib/events/mock.go index 95c7cf72cb2e0..e6e1063e06f82 100644 --- a/lib/events/mock.go +++ b/lib/events/mock.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Gravitational, Inc. +Copyright 2017-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -69,10 +69,9 @@ func (d *MockAuditLog) Close() error { return nil } -// EmitAuditEvent is a mock that records even and fields inside a struct. -func (d *MockAuditLog) EmitAuditEvent(ev Event, fields EventFields) error { +// EmitAuditEventLegacy is a mock that records event and fields inside a struct. +func (d *MockAuditLog) EmitAuditEventLegacy(ev Event, fields EventFields) error { d.EmittedEvent = &EmittedEvent{ev, fields} - return nil } @@ -109,3 +108,63 @@ func (d *MockAuditLog) SearchSessionEvents(fromUTC, toUTC time.Time, limit int) func (d *MockAuditLog) Reset() { d.EmittedEvent = nil } + +// MockEmitter is emitter that stores last audit event +type MockEmitter struct { + mtx sync.RWMutex + lastEvent AuditEvent +} + +// CreateAuditStream creates a stream that discards all events +func (e *MockEmitter) CreateAuditStream(ctx context.Context, sid session.ID) (Stream, error) { + return e, nil +} + +// ResumeAuditStream resumes a stream that discards all events +func (e *MockEmitter) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) { + return e, nil +} + +// EmitAuditEvent emits audit event +func (e *MockEmitter) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + e.mtx.Lock() + defer e.mtx.Unlock() + e.lastEvent = event + return nil +} + +// LastEvent returns last emitted event +func (e *MockEmitter) LastEvent() AuditEvent { + e.mtx.RLock() + defer e.mtx.RUnlock() + return e.lastEvent +} + +// Reset resets state to zero values. +func (e *MockEmitter) Reset() { + e.mtx.Lock() + defer e.mtx.Unlock() + e.lastEvent = nil +} + +// Status returns a channel that always blocks +func (e *MockEmitter) Status() <-chan StreamStatus { + return nil +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (e *MockEmitter) Done() <-chan struct{} { + return nil +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (e *MockEmitter) Close(ctx context.Context) error { + return nil +} + +// Complete does nothing +func (e *MockEmitter) Complete(ctx context.Context) error { + return nil +} diff --git a/lib/events/multilog.go b/lib/events/multilog.go index ae4ac1d00f638..f932b0d5ec7f3 100644 --- a/lib/events/multilog.go +++ b/lib/events/multilog.go @@ -54,11 +54,11 @@ func (m *MultiLog) Close() error { return trace.NewAggregate(errors...) } -// EmitAuditEvent emits audit event -func (m *MultiLog) EmitAuditEvent(event Event, fields EventFields) error { +// EmitAuditEventLegacy emits audit event +func (m *MultiLog) EmitAuditEventLegacy(event Event, fields EventFields) error { var errors []error for _, log := range m.loggers { - errors = append(errors, log.EmitAuditEvent(event, fields)) + errors = append(errors, log.EmitAuditEventLegacy(event, fields)) } return trace.NewAggregate(errors...) } diff --git a/lib/events/playback.go b/lib/events/playback.go new file mode 100644 index 0000000000000..563e9e192c264 --- /dev/null +++ b/lib/events/playback.go @@ -0,0 +1,287 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "archive/tar" + "context" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + log "github.com/sirupsen/logrus" +) + +// Header returns information about playback +type Header struct { + // Tar detected tar format + Tar bool + // Proto is for proto format + Proto bool + // ProtoVersion is a version of the format, valid if Proto is true + ProtoVersion int64 +} + +// DetectFormat detects format by reading first bytes +// of the header. Callers should call Seek() +// to reuse reader after calling this function. +func DetectFormat(r io.ReadSeeker) (*Header, error) { + version := make([]byte, Int64Size) + _, err := io.ReadFull(r, version) + if err != nil { + return nil, trace.ConvertSystemError(err) + } + protocolVersion := binary.BigEndian.Uint64(version) + if protocolVersion == ProtoStreamV1 { + return &Header{ + Proto: true, + ProtoVersion: int64(protocolVersion), + }, nil + } + _, err = r.Seek(0, 0) + if err != nil { + return nil, trace.ConvertSystemError(err) + } + tr := tar.NewReader(r) + _, err = tr.Next() + if err != nil { + return nil, trace.ConvertSystemError(err) + } + return &Header{Tar: true}, nil +} + +// WriteForPlayback reads events from audit reader +// and writes them to the format optimized for playback +func WriteForPlayback(ctx context.Context, sid session.ID, reader AuditReader, dir string) error { + w := &PlaybackWriter{ + sid: sid, + reader: reader, + dir: dir, + eventIndex: -1, + } + defer func() { + if err := w.Close(); err != nil { + log.WithError(err).Warningf("Failed to close writer.") + } + }() + return w.Write(ctx) +} + +// PlaybackWriter reads messages until end of file +// and writes them to directory in compatibility playback format +type PlaybackWriter struct { + sid session.ID + dir string + reader AuditReader + indexFile *os.File + eventsFile *gzipWriter + chunksFile *gzipWriter + eventIndex int64 +} + +// Close closes all files +func (w *PlaybackWriter) Close() error { + if w.indexFile != nil { + w.indexFile.Close() + } + + if w.chunksFile != nil { + if err := w.chunksFile.Flush(); err != nil { + log.Warningf("Failed to flush chunks file: %v.", err) + } + + if err := w.chunksFile.Close(); err != nil { + log.Warningf("Failed closing chunks file: %v.", err) + } + } + + if w.eventsFile != nil { + if err := w.eventsFile.Flush(); err != nil { + log.Warningf("Failed to flush events file: %v.", err) + } + + if err := w.eventsFile.Close(); err != nil { + log.Warningf("Failed closing events file: %v.", err) + } + } + + return nil +} + +// Write writes the files in the format optimized for playback +func (w *PlaybackWriter) Write(ctx context.Context) error { + if err := w.openIndexFile(); err != nil { + return trace.Wrap(err) + } + for { + event, err := w.reader.Read(ctx) + if err != nil { + if err == io.EOF { + return nil + } + return trace.Wrap(err) + } + if err := w.writeEvent(event); err != nil { + return trace.Wrap(err) + } + } +} + +func (w *PlaybackWriter) writeEvent(event AuditEvent) error { + switch event.GetType() { + // Timing events for TTY playback go to both a chunks file (the raw bytes) as + // well as well as the events file (structured events). + case SessionPrintEvent: + return trace.Wrap(w.writeSessionPrintEvent(event)) + // Playback does not use enhanced events at the moment, + // so they are skipped + case SessionCommandEvent, SessionDiskEvent, SessionNetworkEvent: + return nil + // All other events get put into the general events file. These are events like + // session.join, session.end, etc. + default: + return trace.Wrap(w.writeRegularEvent(event)) + } +} + +func (w *PlaybackWriter) writeSessionPrintEvent(event AuditEvent) error { + print, ok := event.(*SessionPrint) + if !ok { + return trace.BadParameter("expected session print event, got %T", event) + } + w.eventIndex++ + event.SetIndex(w.eventIndex) + if err := w.openEventsFile(0); err != nil { + return trace.Wrap(err) + } + if err := w.openChunksFile(0); err != nil { + return trace.Wrap(err) + } + data := print.Data + print.Data = nil + bytes, err := utils.FastMarshal(event) + if err != nil { + return trace.Wrap(err) + } + _, err = w.eventsFile.Write(append(bytes, '\n')) + if err != nil { + return trace.Wrap(err) + } + _, err = w.chunksFile.Write(data) + if err != nil { + return trace.Wrap(err) + } + return nil +} + +func (w *PlaybackWriter) writeRegularEvent(event AuditEvent) error { + w.eventIndex++ + event.SetIndex(w.eventIndex) + if err := w.openEventsFile(0); err != nil { + return trace.Wrap(err) + } + bytes, err := utils.FastMarshal(event) + if err != nil { + return trace.Wrap(err) + } + _, err = w.eventsFile.Write(append(bytes, '\n')) + if err != nil { + return trace.Wrap(err) + } + return nil +} + +func (w *PlaybackWriter) openIndexFile() error { + if w.indexFile != nil { + return nil + } + var err error + w.indexFile, err = os.OpenFile( + filepath.Join(w.dir, fmt.Sprintf("%v.index", w.sid.String())), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) + if err != nil { + return trace.Wrap(err) + } + return nil +} + +func (w *PlaybackWriter) openEventsFile(eventIndex int64) error { + if w.eventsFile != nil { + return nil + } + eventsFileName := eventsFileName(w.dir, w.sid, "", eventIndex) + + // update the index file to write down that new events file has been created + data, err := utils.FastMarshal(indexEntry{ + FileName: filepath.Base(eventsFileName), + Type: fileTypeEvents, + Index: eventIndex, + }) + if err != nil { + return trace.Wrap(err) + } + + _, err = fmt.Fprintf(w.indexFile, "%v\n", string(data)) + if err != nil { + return trace.Wrap(err) + } + + // open new events file for writing + file, err := os.OpenFile(eventsFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) + if err != nil { + return trace.Wrap(err) + } + w.eventsFile = newGzipWriter(file) + return nil +} + +func (w *PlaybackWriter) openChunksFile(offset int64) error { + if w.chunksFile != nil { + return nil + } + chunksFileName := chunksFileName(w.dir, w.sid, offset) + + // Update the index file to write down that new chunks file has been created. + data, err := utils.FastMarshal(indexEntry{ + FileName: filepath.Base(chunksFileName), + Type: fileTypeChunks, + Offset: offset, + }) + if err != nil { + return trace.Wrap(err) + } + + // index file will contain file name with extension .gz (assuming it was gzipped) + _, err = fmt.Fprintf(w.indexFile, "%v\n", string(data)) + if err != nil { + return trace.Wrap(err) + } + + // open the chunks file for writing, but because the file is written without + // compression, remove the .gz + file, err := os.OpenFile(chunksFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) + if err != nil { + return trace.Wrap(err) + } + w.chunksFile = newGzipWriter(file) + return nil +} diff --git a/lib/events/recorder.go b/lib/events/recorder.go index 408397fdf074c..469c2119ea856 100644 --- a/lib/events/recorder.go +++ b/lib/events/recorder.go @@ -1,5 +1,5 @@ /* -Copyright 2015-2018 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -33,9 +33,8 @@ import ( // associated with every session. It forwards session stream to the audit log type SessionRecorder interface { io.Writer - io.Closer - // GetAuditLog returns audit log associated with this log - GetAuditLog() IAuditLog + Emitter + Close(ctx context.Context) error } // DiscardRecorder discards all writes diff --git a/lib/events/s3sessions/s3handler.go b/lib/events/s3sessions/s3handler.go index 934d6a1364379..bb9405cbd1710 100644 --- a/lib/events/s3sessions/s3handler.go +++ b/lib/events/s3sessions/s3handler.go @@ -158,7 +158,7 @@ type Handler struct { client *s3.S3 } -// Closer releases connection and resources associated with log if any +// Close releases connection and resources associated with log if any func (l *Handler) Close() error { return nil } @@ -285,6 +285,10 @@ func (l *Handler) path(sessionID session.ID) string { return strings.TrimPrefix(filepath.Join(l.Path, string(sessionID)+".tar"), "/") } +func (l *Handler) fromPath(path string) session.ID { + return session.ID(strings.TrimSuffix(filepath.Base(path), ".tar")) +} + // ensureBucket makes sure bucket exists, and if it does not, creates it func (h *Handler) ensureBucket() error { _, err := h.client.HeadBucket(&s3.HeadBucketInput{ diff --git a/lib/events/s3sessions/s3handler_test.go b/lib/events/s3sessions/s3handler_test.go index 8172bba243d3a..f5ec2a6524e7b 100644 --- a/lib/events/s3sessions/s3handler_test.go +++ b/lib/events/s3sessions/s3handler_test.go @@ -26,44 +26,30 @@ import ( "github.com/gravitational/teleport/lib/events/test" "github.com/gravitational/teleport/lib/utils" - "github.com/gravitational/trace" - "github.com/pborman/uuid" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" ) -func Test(t *testing.T) { check.TestingT(t) } +// TestStreams tests various streaming upload scenarios +func TestStreams(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) -type S3Suite struct { - handler *Handler - test.HandlerSuite -} - -var _ = check.Suite(&S3Suite{}) - -func (s *S3Suite) SetUpSuite(c *check.C) { - utils.InitLoggerForTests() - - var err error - s.HandlerSuite.Handler, err = NewHandler(Config{ + handler, err := NewHandler(Config{ Region: "us-west-1", Path: "/test/", - Bucket: fmt.Sprintf("teleport-test-%v", uuid.New()), + Bucket: fmt.Sprintf("teleport-unit-tests"), }) - c.Assert(err, check.IsNil) -} + assert.Nil(t, err) -func (s *S3Suite) TestUploadDownload(c *check.C) { - s.UploadDownload(c) -} + defer handler.Close() -func (s *S3Suite) TestDownloadNotFound(c *check.C) { - s.DownloadNotFound(c) -} - -func (s *S3Suite) TearDownSuite(c *check.C) { - if s.handler != nil { - if err := s.handler.deleteBucket(); err != nil { - c.Fatalf("Failed to delete bucket: %#v", trace.DebugReport(err)) - } - } + // Stream with handler and many parts + t.Run("StreamSinglePart", func(t *testing.T) { + test.StreamSinglePart(t, handler) + }) + t.Run("UploadDownload", func(t *testing.T) { + test.UploadDownload(t, handler) + }) + t.Run("DownloadNotFound", func(t *testing.T) { + test.DownloadNotFound(t, handler) + }) } diff --git a/lib/events/s3sessions/s3handler_thirdparty_test.go b/lib/events/s3sessions/s3handler_thirdparty_test.go index 52da99bee1be4..25bbc02668516 100644 --- a/lib/events/s3sessions/s3handler_thirdparty_test.go +++ b/lib/events/s3sessions/s3handler_thirdparty_test.go @@ -20,6 +20,7 @@ package s3sessions import ( "fmt" "net/http/httptest" + "testing" "github.com/gravitational/teleport/lib/events/test" "github.com/gravitational/teleport/lib/utils" @@ -29,52 +30,43 @@ import ( "github.com/johannesboyne/gofakes3" "github.com/johannesboyne/gofakes3/backend/s3mem" "github.com/pborman/uuid" - "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" ) -type S3ThirdPartySuite struct { - backend gofakes3.Backend - faker *gofakes3.GoFakeS3 - server *httptest.Server - handler *Handler - test.HandlerSuite -} - -var _ = check.Suite(&S3ThirdPartySuite{}) - -func (s *S3ThirdPartySuite) SetUpSuite(c *check.C) { - utils.InitLoggerForTests() +// TestThirdpartyStreams tests various streaming upload scenarios +// implemented by third party backends using fake backend +func TestThirdpartyStreams(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) - //fakes3 var timeSource gofakes3.TimeSource - s.backend = s3mem.New(s3mem.WithTimeSource(timeSource)) - s.faker = gofakes3.New(s.backend, gofakes3.WithLogger(gofakes3.GlobalLog())) - s.server = httptest.NewServer(s.faker.Server()) + backend := s3mem.New(s3mem.WithTimeSource(timeSource)) + faker := gofakes3.New(backend, gofakes3.WithLogger(gofakes3.GlobalLog())) + server := httptest.NewServer(faker.Server()) - var err error - s.HandlerSuite.Handler, err = NewHandler(Config{ + handler, err := NewHandler(Config{ Credentials: credentials.NewStaticCredentials("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), Region: "us-west-1", Path: "/test/", Bucket: fmt.Sprintf("teleport-test-%v", uuid.New()), - Endpoint: s.server.URL, + Endpoint: server.URL, DisableServerSideEncryption: true, }) - c.Assert(err, check.IsNil) -} - -func (s *S3ThirdPartySuite) TestUploadDownload(c *check.C) { - s.UploadDownload(c) -} + assert.Nil(t, err) -func (s *S3ThirdPartySuite) TestDownloadNotFound(c *check.C) { - s.DownloadNotFound(c) -} - -func (s *S3ThirdPartySuite) TearDownSuite(c *check.C) { - if s.handler != nil { - if err := s.handler.deleteBucket(); err != nil { - c.Fatalf("Failed to delete bucket: %#v", trace.DebugReport(err)) + defer func() { + if err := handler.deleteBucket(); err != nil { + t.Fatalf("Failed to delete bucket: %#v", trace.DebugReport(err)) } - } + }() + + // Stream with handler and many parts + t.Run("StreamManyParts", func(t *testing.T) { + test.Stream(t, handler) + }) + t.Run("UploadDownload", func(t *testing.T) { + test.UploadDownload(t, handler) + }) + t.Run("DownloadNotFound", func(t *testing.T) { + test.DownloadNotFound(t, handler) + }) } diff --git a/lib/events/s3sessions/s3stream.go b/lib/events/s3sessions/s3stream.go new file mode 100644 index 0000000000000..6fcd26376ea39 --- /dev/null +++ b/lib/events/s3sessions/s3stream.go @@ -0,0 +1,186 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3sessions + +import ( + "context" + "io" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/session" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/gravitational/trace" +) + +// CreateUpload creates a multipart upload +func (h *Handler) CreateUpload(ctx context.Context, sessionID session.ID) (*events.StreamUpload, error) { + start := time.Now() + defer func() { h.Infof("Upload created in %v.", time.Since(start)) }() + + input := &s3.CreateMultipartUploadInput{ + Bucket: aws.String(h.Bucket), + Key: aws.String(h.path(sessionID)), + } + if !h.Config.DisableServerSideEncryption { + input.ServerSideEncryption = aws.String(s3.ServerSideEncryptionAwsKms) + } + + resp, err := h.client.CreateMultipartUploadWithContext(ctx, input) + if err != nil { + return nil, ConvertS3Error(err) + } + + return &events.StreamUpload{SessionID: sessionID, ID: *resp.UploadId}, nil +} + +// UploadPart uploads part +func (h *Handler) UploadPart(ctx context.Context, upload events.StreamUpload, partNumber int64, partBody io.ReadSeeker) (*events.StreamPart, error) { + start := time.Now() + defer func() { h.Infof("UploadPart(%v) part(%v) uploaded in %v.", upload.ID, partNumber, time.Since(start)) }() + + // This upload exceeded maximum number of supported parts, error now. + if partNumber > s3manager.MaxUploadParts { + return nil, trace.LimitExceeded( + "exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", s3manager.MaxUploadParts) + } + + params := &s3.UploadPartInput{ + Bucket: aws.String(h.Bucket), + UploadId: aws.String(upload.ID), + Key: aws.String(h.path(upload.SessionID)), + Body: partBody, + PartNumber: aws.Int64(partNumber), + } + resp, err := h.client.UploadPartWithContext(ctx, params) + if err != nil { + return nil, ConvertS3Error(err) + } + + return &events.StreamPart{ETag: *resp.ETag, Number: partNumber}, nil +} + +// CompleteUpload completes the upload +func (h *Handler) CompleteUpload(ctx context.Context, upload events.StreamUpload, parts []events.StreamPart) error { + start := time.Now() + defer func() { h.Infof("UploadPart(%v) completed in %v.", upload.ID, time.Since(start)) }() + + // Parts must be sorted in PartNumber order. + sort.Slice(parts, func(i, j int) bool { + return parts[i].Number < parts[j].Number + }) + + completedParts := make([]*s3.CompletedPart, len(parts)) + for i := range parts { + completedParts[i] = &s3.CompletedPart{ + ETag: aws.String(parts[i].ETag), + PartNumber: aws.Int64(parts[i].Number), + } + } + + params := &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(h.Bucket), + Key: aws.String(h.path(upload.SessionID)), + UploadId: aws.String(upload.ID), + MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts}, + } + _, err := h.client.CompleteMultipartUploadWithContext(ctx, params) + if err != nil { + return ConvertS3Error(err) + } + return nil +} + +// ListParts lists upload parts +func (h *Handler) ListParts(ctx context.Context, upload events.StreamUpload) ([]events.StreamPart, error) { + var parts []events.StreamPart + var partNumberMarker *int64 + for i := 0; i < defaults.MaxIterationLimit; i++ { + re, err := h.client.ListParts(&s3.ListPartsInput{ + Bucket: aws.String(h.Bucket), + Key: aws.String(h.path(upload.SessionID)), + UploadId: aws.String(upload.ID), + PartNumberMarker: partNumberMarker, + }) + if err != nil { + return nil, ConvertS3Error(err) + } + for _, part := range re.Parts { + parts = append(parts, events.StreamPart{ + Number: *part.PartNumber, + ETag: *part.ETag, + }) + } + if !*re.IsTruncated { + break + } + partNumberMarker = re.PartNumberMarker + } + // Parts must be sorted in PartNumber order. + sort.Slice(parts, func(i, j int) bool { + return parts[i].Number < parts[j].Number + }) + return parts, nil +} + +// ListUploads lists uploads that have been initiated but not completed with +// earlier uploads returned first +func (h *Handler) ListUploads(ctx context.Context) ([]events.StreamUpload, error) { + var prefix *string + if h.Path != "" { + trimmed := strings.TrimPrefix(h.Path, "/") + prefix = &trimmed + } + var uploads []events.StreamUpload + var keyMarker *string + var uploadIDMarker *string + for i := 0; i < defaults.MaxIterationLimit; i++ { + input := &s3.ListMultipartUploadsInput{ + Bucket: aws.String(h.Bucket), + Prefix: prefix, + KeyMarker: keyMarker, + UploadIdMarker: uploadIDMarker, + } + re, err := h.client.ListMultipartUploads(input) + if err != nil { + return nil, ConvertS3Error(err) + } + for _, upload := range re.Uploads { + uploads = append(uploads, events.StreamUpload{ + ID: *upload.UploadId, + SessionID: h.fromPath(*upload.Key), + Initiated: *upload.Initiated, + }) + } + if !*re.IsTruncated { + break + } + keyMarker = re.KeyMarker + uploadIDMarker = re.UploadIdMarker + } + sort.Slice(uploads, func(i, j int) bool { + return uploads[i].Initiated.Before(uploads[j].Initiated) + }) + return uploads, nil +} diff --git a/lib/events/sessionlog.go b/lib/events/sessionlog.go index c6bc366af9018..5af7d773b04fe 100644 --- a/lib/events/sessionlog.go +++ b/lib/events/sessionlog.go @@ -593,7 +593,7 @@ type printEvent struct { // gzipWriter wraps file, on close close both gzip writer and file type gzipWriter struct { *gzip.Writer - file *os.File + inner io.WriteCloser } // Close closes gzip writer and file @@ -605,9 +605,9 @@ func (f *gzipWriter) Close() error { writerPool.Put(f.Writer) f.Writer = nil } - if f.file != nil { - errors = append(errors, f.file.Close()) - f.file = nil + if f.inner != nil { + errors = append(errors, f.inner.Close()) + f.inner = nil } return trace.NewAggregate(errors...) } @@ -623,15 +623,46 @@ var writerPool = sync.Pool{ }, } -func newGzipWriter(file *os.File) *gzipWriter { +func newGzipWriter(writer io.WriteCloser) *gzipWriter { g := writerPool.Get().(*gzip.Writer) - g.Reset(file) + g.Reset(writer) return &gzipWriter{ Writer: g, - file: file, + inner: writer, } } +// gzipReader wraps file, on close close both gzip writer and file +type gzipReader struct { + io.ReadCloser + inner io.Closer +} + +// Close closes file and gzip writer +func (f *gzipReader) Close() error { + var errors []error + if f.ReadCloser != nil { + errors = append(errors, f.ReadCloser.Close()) + f.ReadCloser = nil + } + if f.inner != nil { + errors = append(errors, f.inner.Close()) + f.inner = nil + } + return trace.NewAggregate(errors...) +} + +func newGzipReader(reader io.ReadCloser) (*gzipReader, error) { + gzReader, err := gzip.NewReader(reader) + if err != nil { + return nil, trace.Wrap(err) + } + return &gzipReader{ + ReadCloser: gzReader, + inner: reader, + }, nil +} + const ( // eventsSuffix is the suffix of the archive that contains session events. eventsSuffix = "events.gz" diff --git a/lib/events/stream.go b/lib/events/stream.go new file mode 100644 index 0000000000000..71c884546c435 --- /dev/null +++ b/lib/events/stream.go @@ -0,0 +1,1260 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "sort" + "sync" + "time" + + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + "github.com/pborman/uuid" + log "github.com/sirupsen/logrus" + "go.uber.org/atomic" +) + +const ( + // Int32Size is a constant for 32 bit integer byte size + Int32Size = 4 + + // Int64Size is a constant for 64 bit integer byte size + Int64Size = 8 + + // MaxProtoMessageSizeBytes is maximum protobuf marshaled message size + MaxProtoMessageSizeBytes = 64 * 1024 + + // MaxUploadParts is the maximum allowed number of parts in a multi-part upload + // on Amazon S3. + MaxUploadParts = 10000 + + // MinUploadPartSizeBytes is the minimum allowed part size when uploading a part to + // Amazon S3. + MinUploadPartSizeBytes = 1024 * 1024 * 5 + + // ReservedParts is the amount of parts reserved by default + ReservedParts = 100 + + // ProtoStreamV1 is a version of the binary protocol + ProtoStreamV1 = 1 + + // ProtoStreamV1PartHeaderSize is the size of the part of the protocol stream + // on disk format, it consists of + // * 8 bytes for the format version + // * 8 bytes for meaningful size of the part + // * 8 bytes for optional padding size at the end of the slice + ProtoStreamV1PartHeaderSize = Int64Size * 3 + + // ProtoStreamV1RecordHeaderSize is the size of the header + // of the record header, it consists of the record length + ProtoStreamV1RecordHeaderSize = Int32Size +) + +// ProtoStreamerConfig specifies configuration for the part +type ProtoStreamerConfig struct { + Uploader MultipartUploader + // MinUploadBytes submits upload when they have reached min bytes (could be more, + // but not less), due to the nature of gzip writer + MinUploadBytes int64 + // ConcurrentUploads sets concurrent uploads per stream + ConcurrentUploads int +} + +// CheckAndSetDefaults checks and sets streamer defaults +func (cfg *ProtoStreamerConfig) CheckAndSetDefaults() error { + if cfg.Uploader == nil { + return trace.BadParameter("missing parameter Uploader") + } + if cfg.MinUploadBytes == 0 { + cfg.MinUploadBytes = MinUploadPartSizeBytes + } + if cfg.ConcurrentUploads == 0 { + cfg.ConcurrentUploads = defaults.ConcurrentUploadsPerStream + } + return nil +} + +// NewProtoStreamer creates protobuf-based streams +func NewProtoStreamer(cfg ProtoStreamerConfig) (*ProtoStreamer, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + return &ProtoStreamer{ + cfg: cfg, + // Min upload bytes + some overhead to prevent buffer growth (gzip writer is not precise) + bufferPool: utils.NewBufferSyncPool(cfg.MinUploadBytes + cfg.MinUploadBytes/3), + // MaxProtoMessage size + length of the message record + slicePool: utils.NewSliceSyncPool(MaxProtoMessageSizeBytes + ProtoStreamV1RecordHeaderSize), + }, nil +} + +// ProtoStreamer creates protobuf-based streams uploaded to the storage +// backends, for example S3 or GCS +type ProtoStreamer struct { + cfg ProtoStreamerConfig + bufferPool *utils.BufferSyncPool + slicePool *utils.SliceSyncPool +} + +// CreateAuditStreamForUpload creates audit stream for existing upload, +// this function is useful in tests +func (s *ProtoStreamer) CreateAuditStreamForUpload(ctx context.Context, sid session.ID, upload StreamUpload) (Stream, error) { + return NewProtoStream(ProtoStreamConfig{ + Upload: upload, + BufferPool: s.bufferPool, + SlicePool: s.slicePool, + Uploader: s.cfg.Uploader, + MinUploadBytes: s.cfg.MinUploadBytes, + ConcurrentUploads: s.cfg.ConcurrentUploads, + }) +} + +// CreateAuditStream creates audit stream and upload +func (s *ProtoStreamer) CreateAuditStream(ctx context.Context, sid session.ID) (Stream, error) { + upload, err := s.cfg.Uploader.CreateUpload(ctx, sid) + if err != nil { + return nil, trace.Wrap(err) + } + return s.CreateAuditStreamForUpload(ctx, sid, *upload) +} + +// ResumeAuditStream resumes the stream that has not been completed yet +func (s *ProtoStreamer) ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) { + // Note, that if the session ID does not match the upload ID, + // the request will fail + upload := StreamUpload{SessionID: sid, ID: uploadID} + parts, err := s.cfg.Uploader.ListParts(ctx, upload) + if err != nil { + return nil, trace.Wrap(err) + } + return NewProtoStream(ProtoStreamConfig{ + Upload: upload, + BufferPool: s.bufferPool, + SlicePool: s.slicePool, + Uploader: s.cfg.Uploader, + MinUploadBytes: s.cfg.MinUploadBytes, + CompletedParts: parts, + }) +} + +// ProtoStreamConfig configures proto stream +type ProtoStreamConfig struct { + // Upload is the upload this stream is handling + Upload StreamUpload + // Uploader handles upload to the storage + Uploader MultipartUploader + // BufferPool is a sync pool with buffers + BufferPool *utils.BufferSyncPool + // SlicePool is a sync pool with allocated slices + SlicePool *utils.SliceSyncPool + // MinUploadBytes submits upload when they have reached min bytes (could be more, + // but not less), due to the nature of gzip writer + MinUploadBytes int64 + // CompletedParts is a list of completed parts, used for resuming stream + CompletedParts []StreamPart + // InactivityFlushPeriod sets inactivity period + // after which streamer flushes the data to the uploader + // to avoid data loss + InactivityFlushPeriod time.Duration + // Clock is used to override time in tests + Clock clockwork.Clock + // ConcurrentUploads sets concurrent uploads per stream + ConcurrentUploads int +} + +// CheckAndSetDefaults checks and sets default values +func (cfg *ProtoStreamConfig) CheckAndSetDefaults() error { + if err := cfg.Upload.CheckAndSetDefaults(); err != nil { + return trace.Wrap(err) + } + if cfg.Uploader == nil { + return trace.BadParameter("missing parameter Uploader") + } + if cfg.BufferPool == nil { + return trace.BadParameter("missing parameter BufferPool") + } + if cfg.SlicePool == nil { + return trace.BadParameter("missing parameter SlicePool") + } + if cfg.MinUploadBytes == 0 { + return trace.BadParameter("missing parameter MinUploadBytes") + } + if cfg.InactivityFlushPeriod == 0 { + cfg.InactivityFlushPeriod = defaults.InactivityFlushPeriod + } + if cfg.ConcurrentUploads == 0 { + cfg.ConcurrentUploads = defaults.ConcurrentUploadsPerStream + } + if cfg.Clock == nil { + cfg.Clock = clockwork.NewRealClock() + } + return nil +} + +// NewProtoStream uploads session recordings to the protobuf format. +// +// The individual session stream is represented by continuous globally +// ordered sequence of events serialized to binary protobuf format. +// +// +// The stream is split into ordered slices of gzipped audit events. +// +// Each slice is composed of three parts: +// +// 1. Slice starts with 24 bytes version header +// +// * 8 bytes for the format version (used for future expansion) +// * 8 bytes for meaningful size of the part +// * 8 bytes for padding at the end of the slice (if present) +// +// 2. V1 body of the slice is gzipped protobuf messages in binary format. +// +// 3. Optional padding (if specified in the header), required +// to bring slices to minimum slice size. +// +// The slice size is determined by S3 multipart upload requirements: +// +// https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html +// +// This design allows the streamer to upload slices using S3-compatible APIs +// in parallel without buffering to disk. +// +func NewProtoStream(cfg ProtoStreamConfig) (*ProtoStream, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + cancelCtx, cancel := context.WithCancel(context.Background()) + completeCtx, complete := context.WithCancel(context.Background()) + uploadsCtx, uploadsDone := context.WithCancel(context.Background()) + stream := &ProtoStream{ + cfg: cfg, + eventsCh: make(chan protoEvent), + + cancelCtx: cancelCtx, + cancel: cancel, + + completeCtx: completeCtx, + complete: complete, + completeType: atomic.NewUint32(completeTypeComplete), + completeMtx: &sync.RWMutex{}, + + uploadsCtx: uploadsCtx, + uploadsDone: uploadsDone, + + // Buffered channel gives consumers + // a chance to get an early status update. + statusCh: make(chan StreamStatus, 1), + } + + writer := &sliceWriter{ + proto: stream, + activeUploads: make(map[int64]*activeUpload), + completedUploadsC: make(chan *activeUpload, cfg.ConcurrentUploads), + semUploads: make(chan struct{}, cfg.ConcurrentUploads), + lastPartNumber: 0, + } + if len(cfg.CompletedParts) > 0 { + // skip 2 extra parts as a protection from accidental overwrites. + // the following is possible between processes 1 and 2 (P1 and P2) + // P1: * start stream S + // P1: * receive some data from stream S + // C: * disconnect from P1 + // P2: * resume stream, get all committed parts (0) and start writes + // P2: * write part 1 + // P1: * flush the data to part 1 before closure + // + // In this scenario stream data submitted by P1 flush will be lost + // unless resume will resume at part 2. + // + // On the other hand, it's ok if resume of P2 overwrites + // any data of P1, because it will replay non committed + // events, which could potentially lead to duplicate events. + writer.lastPartNumber = cfg.CompletedParts[len(cfg.CompletedParts)-1].Number + 1 + writer.completedParts = cfg.CompletedParts + } + go writer.receiveAndUpload() + return stream, nil +} + +// ProtoStream implements concurrent safe event emitter +// that uploads the parts in parallel to S3 +type ProtoStream struct { + cfg ProtoStreamConfig + + eventsCh chan protoEvent + + // cancelCtx is used to signal closure + cancelCtx context.Context + cancel context.CancelFunc + + // completeCtx is used to signal completion of the operation + completeCtx context.Context + complete context.CancelFunc + completeType *atomic.Uint32 + completeResult error + completeMtx *sync.RWMutex + + // uploadsCtx is used to signal that all uploads have been completed + uploadsCtx context.Context + // uploadsDone is a function signalling that uploads have completed + uploadsDone context.CancelFunc + + // statusCh sends updates on the stream status + statusCh chan StreamStatus +} + +const ( + // completeTypeComplete means that proto stream + // should complete all in flight uploads and complete the upload itself + completeTypeComplete = 0 + // completeTypeFlush means that proto stream + // should complete all in flight uploads but do not complete the upload + completeTypeFlush = 1 +) + +type protoEvent struct { + index int64 + oneof *OneOf +} + +func (s *ProtoStream) setCompleteResult(err error) { + s.completeMtx.Lock() + defer s.completeMtx.Unlock() + s.completeResult = err +} + +func (s *ProtoStream) getCompleteResult() error { + s.completeMtx.RLock() + defer s.completeMtx.RUnlock() + return s.completeResult +} + +// Done returns channel closed when streamer is closed +// should be used to detect sending errors +func (s *ProtoStream) Done() <-chan struct{} { + return s.cancelCtx.Done() +} + +// EmitAuditEvent emits a single audit event to the stream +func (s *ProtoStream) EmitAuditEvent(ctx context.Context, event AuditEvent) error { + oneof, err := ToOneOf(event) + if err != nil { + return trace.Wrap(err) + } + + messageSize := oneof.Size() + if messageSize > MaxProtoMessageSizeBytes { + return trace.BadParameter("record size %v exceeds max message size of %v bytes", messageSize, MaxProtoMessageSizeBytes) + } + + start := time.Now() + select { + case s.eventsCh <- protoEvent{index: event.GetIndex(), oneof: oneof}: + diff := time.Since(start) + if diff > 100*time.Millisecond { + log.Debugf("[SLOW] EmitAuditEvent took %v.", diff) + } + return nil + case <-s.cancelCtx.Done(): + return trace.ConnectionProblem(nil, "emitter is closed") + case <-s.completeCtx.Done(): + return trace.ConnectionProblem(nil, "emitter is completed") + case <-ctx.Done(): + return trace.ConnectionProblem(ctx.Err(), "context is closed") + } +} + +// Complete completes the upload, waits for completion and returns all allocated resources. +func (s *ProtoStream) Complete(ctx context.Context) error { + s.complete() + select { + // wait for all in-flight uploads to complete and stream to be completed + case <-s.uploadsCtx.Done(): + s.cancel() + return s.getCompleteResult() + case <-ctx.Done(): + return trace.ConnectionProblem(ctx.Err(), "context has cancelled before complete could succeed") + } +} + +// Status returns channel receiving updates about stream status +// last event index that was uploaded and upload ID +func (s *ProtoStream) Status() <-chan StreamStatus { + return s.statusCh +} + +// Close flushes non-uploaded flight stream data without marking +// the stream completed and closes the stream instance +func (s *ProtoStream) Close(ctx context.Context) error { + s.completeType.Store(completeTypeFlush) + s.complete() + select { + // wait for all in-flight uploads to complete and stream to be completed + case <-s.uploadsCtx.Done(): + return nil + case <-ctx.Done(): + return trace.ConnectionProblem(ctx.Err(), "context has cancelled before complete could succeed") + } +} + +// sliceWriter is a helper struct that coordinates +// writing slices and checkpointing +type sliceWriter struct { + proto *ProtoStream + // current is the current slice being written to + current *slice + // lastPartNumber is the last assigned part number + lastPartNumber int64 + // activeUploads tracks active uploads + activeUploads map[int64]*activeUpload + // completedUploadsC receives uploads that have been completed + completedUploadsC chan *activeUpload + // semUploads controls concurrent uploads that are in flight + semUploads chan struct{} + // completedParts is the list of completed parts + completedParts []StreamPart + // emptyHeader is used to write empty header + // to preserve some bytes + emptyHeader [ProtoStreamV1PartHeaderSize]byte +} + +func (w *sliceWriter) updateCompletedParts(part StreamPart, lastEventIndex int64) { + w.completedParts = append(w.completedParts, part) + w.trySendStreamStatusUpdate(lastEventIndex) +} + +func (w *sliceWriter) trySendStreamStatusUpdate(lastEventIndex int64) { + status := StreamStatus{ + UploadID: w.proto.cfg.Upload.ID, + LastEventIndex: lastEventIndex, + LastUploadTime: w.proto.cfg.Clock.Now().UTC(), + } + select { + case w.proto.statusCh <- status: + default: + } +} + +// receiveAndUpload receives and uploads serialized events +func (w *sliceWriter) receiveAndUpload() { + // on the start, send stream status with the upload ID and negative + // index so that remote party can get an upload ID + w.trySendStreamStatusUpdate(-1) + + clock := w.proto.cfg.Clock + + var lastEvent time.Time + var flushCh <-chan time.Time + for { + select { + case <-w.proto.cancelCtx.Done(): + // cancel stops all operations without waiting + return + case <-w.proto.completeCtx.Done(): + // if present, send remaining data for upload + if w.current != nil { + // mark that the current part is last (last parts are allowed to be + // smaller than the certain size, otherwise the padding + // have to be added (this is due to S3 API limits) + if w.proto.completeType.Load() == completeTypeComplete { + w.current.isLast = true + } + if err := w.startUploadCurrentSlice(); err != nil { + return + } + } + defer w.completeStream() + return + case upload := <-w.completedUploadsC: + part, err := upload.getPart() + if err != nil { + log.WithError(err).Error("Could not upload part after retrying, aborting.") + w.proto.cancel() + return + } + delete(w.activeUploads, part.Number) + w.updateCompletedParts(*part, upload.lastEventIndex) + case <-flushCh: + now := clock.Now().UTC() + inactivityPeriod := now.Sub(lastEvent) + if inactivityPeriod < 0 { + inactivityPeriod = 0 + } + if inactivityPeriod >= w.proto.cfg.InactivityFlushPeriod { + // inactivity period exceeded threshold, + // there is no need to schedule a timer until the next + // event occurs, set the timer channel to nil + flushCh = nil + if w.current != nil { + log.Debugf("Inactivity timer ticked at %v, inactivity period: %v exceeded threshold and have data. Flushing.", now, inactivityPeriod) + if err := w.startUploadCurrentSlice(); err != nil { + return + } + } else { + log.Debugf("Inactivity timer ticked at %v, inactivity period: %v exceeded threshold but have no data. Nothing to do.", now, inactivityPeriod) + } + } else { + log.Debugf("Inactivity timer ticked at %v, inactivity period: %v have not exceeded threshold. Set timer to tick after %v.", now, inactivityPeriod, w.proto.cfg.InactivityFlushPeriod-inactivityPeriod) + flushCh = clock.After(w.proto.cfg.InactivityFlushPeriod - inactivityPeriod) + } + case event := <-w.proto.eventsCh: + lastEvent = clock.Now().UTC() + // flush timer is set up only if any event was submitted + // after last flush or system start + if flushCh == nil { + flushCh = clock.After(w.proto.cfg.InactivityFlushPeriod) + } + if err := w.submitEvent(event); err != nil { + log.WithError(err).Error("Lost event.") + continue + } + if w.shouldUploadCurrentSlice() { + // this logic blocks the EmitAuditEvent in case if the + // upload has not completed and the current slice is out of capacity + if err := w.startUploadCurrentSlice(); err != nil { + return + } + } + } + } +} + +// shouldUploadCurrentSlice returns true when it's time to upload +// the current slice (it has reached upload bytes) +func (w *sliceWriter) shouldUploadCurrentSlice() bool { + return w.current.shouldUpload() +} + +// startUploadCurrentSlice starts uploading current slice +// and adds it to the waiting list +func (w *sliceWriter) startUploadCurrentSlice() error { + w.lastPartNumber++ + activeUpload, err := w.startUpload(w.lastPartNumber, w.current) + if err != nil { + return trace.Wrap(err) + } + w.activeUploads[w.lastPartNumber] = activeUpload + w.current = nil + return nil +} + +type bufferCloser struct { + *bytes.Buffer +} + +func (b *bufferCloser) Close() error { + return nil +} + +func (w *sliceWriter) newSlice() *slice { + buffer := w.proto.cfg.BufferPool.Get() + buffer.Reset() + // reserve bytes for version header + buffer.Write(w.emptyHeader[:]) + return &slice{ + proto: w.proto, + buffer: buffer, + writer: newGzipWriter(&bufferCloser{Buffer: buffer}), + } +} + +func (w *sliceWriter) submitEvent(event protoEvent) error { + if w.current == nil { + w.current = w.newSlice() + } + return w.current.emitAuditEvent(event) +} + +// completeStream waits for in-flight uploads to finish +// and completes the stream +func (w *sliceWriter) completeStream() { + defer w.proto.uploadsDone() + for range w.activeUploads { + select { + case upload := <-w.completedUploadsC: + part, err := upload.getPart() + if err != nil { + log.WithError(err).Warningf("Failed to upload part.") + continue + } + w.updateCompletedParts(*part, upload.lastEventIndex) + case <-w.proto.cancelCtx.Done(): + return + } + } + if w.proto.completeType.Load() == completeTypeComplete { + // part upload notifications could arrive out of order + sort.Slice(w.completedParts, func(i, j int) bool { + return w.completedParts[i].Number < w.completedParts[j].Number + }) + err := w.proto.cfg.Uploader.CompleteUpload(w.proto.cancelCtx, w.proto.cfg.Upload, w.completedParts) + w.proto.setCompleteResult(err) + if err != nil { + log.WithError(err).Warningf("Failed to complete upload.") + } + } +} + +// startUpload acquires upload semaphore and starts upload, returns error +// only if there is a critical error +func (w *sliceWriter) startUpload(partNumber int64, slice *slice) (*activeUpload, error) { + // acquire semaphore limiting concurrent uploads + select { + case w.semUploads <- struct{}{}: + case <-w.proto.cancelCtx.Done(): + return nil, trace.ConnectionProblem(w.proto.cancelCtx.Err(), "context is closed") + } + activeUpload := &activeUpload{ + partNumber: partNumber, + lastEventIndex: slice.lastEventIndex, + start: time.Now().UTC(), + } + + go func() { + defer func() { + if err := slice.Close(); err != nil { + log.WithError(err).Warningf("Failed to close slice.") + } + }() + + defer func() { + select { + case w.completedUploadsC <- activeUpload: + case <-w.proto.cancelCtx.Done(): + return + } + }() + + defer func() { + <-w.semUploads + }() + + var retry utils.Retry + for i := 0; i < defaults.MaxIterationLimit; i++ { + reader, err := slice.reader() + if err != nil { + activeUpload.setError(err) + return + } + part, err := w.proto.cfg.Uploader.UploadPart(w.proto.cancelCtx, w.proto.cfg.Upload, partNumber, reader) + if err == nil { + activeUpload.setPart(*part) + return + } + // upload is not found is not a transient error, so abort the operation + if errors.Is(trace.Unwrap(err), context.Canceled) || trace.IsNotFound(err) { + activeUpload.setError(err) + return + } + // retry is created on the first upload error + if retry == nil { + var rerr error + retry, rerr = utils.NewLinear(utils.LinearConfig{ + Step: defaults.NetworkRetryDuration, + Max: defaults.NetworkBackoffDuration, + }) + if rerr != nil { + activeUpload.setError(rerr) + return + } + } + retry.Inc() + if _, err := reader.Seek(0, 0); err != nil { + activeUpload.setError(err) + return + } + select { + case <-retry.After(): + log.WithError(err).Debugf("Part upload failed, retrying after backoff.") + case <-w.proto.cancelCtx.Done(): + return + } + } + }() + + return activeUpload, nil +} + +type activeUpload struct { + mtx sync.RWMutex + start time.Time + end time.Time + partNumber int64 + part *StreamPart + err error + lastEventIndex int64 +} + +func (a *activeUpload) setError(err error) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.end = time.Now().UTC() + a.err = err +} + +func (a *activeUpload) setPart(part StreamPart) { + a.mtx.Lock() + defer a.mtx.Unlock() + a.end = time.Now().UTC() + a.part = &part +} + +func (a *activeUpload) getPart() (*StreamPart, error) { + a.mtx.RLock() + defer a.mtx.RUnlock() + if a.err != nil { + return nil, trace.Wrap(a.err) + } + if a.part == nil { + return nil, trace.NotFound("part is not set") + } + return a.part, nil +} + +// slice contains serialized protobuf messages +type slice struct { + proto *ProtoStream + writer *gzipWriter + buffer *bytes.Buffer + isLast bool + lastEventIndex int64 +} + +// reader returns a reader for the bytes written, +// no writes should be done after this method is called +func (s *slice) reader() (io.ReadSeeker, error) { + if err := s.writer.Close(); err != nil { + return nil, trace.Wrap(err) + } + wroteBytes := int64(s.buffer.Len()) + var paddingBytes int64 + // non last slices should be at least min upload bytes (as limited by S3 API spec) + if !s.isLast && wroteBytes < s.proto.cfg.MinUploadBytes { + paddingBytes = s.proto.cfg.MinUploadBytes - wroteBytes + if _, err := s.buffer.ReadFrom(utils.NewRepeatReader(byte(0), int(paddingBytes))); err != nil { + return nil, trace.Wrap(err) + } + } + data := s.buffer.Bytes() + // when the slice was created, the first bytes were reserved + // for the protocol version number and size of the slice in bytes + binary.BigEndian.PutUint64(data[0:], ProtoStreamV1) + binary.BigEndian.PutUint64(data[Int64Size:], uint64(wroteBytes-ProtoStreamV1PartHeaderSize)) + binary.BigEndian.PutUint64(data[Int64Size*2:], uint64(paddingBytes)) + return bytes.NewReader(data), nil +} + +// Close closes buffer and returns all allocated resources +func (s *slice) Close() error { + err := s.writer.Close() + s.proto.cfg.BufferPool.Put(s.buffer) + s.buffer = nil + return trace.Wrap(err) +} + +// shouldUpload returns true if it's time to write the slice +// (set to true when it has reached the min slice in bytes) +func (s *slice) shouldUpload() bool { + return int64(s.buffer.Len()) >= s.proto.cfg.MinUploadBytes +} + +// emitAuditEvent emits a single audit event to the stream +func (s *slice) emitAuditEvent(event protoEvent) error { + bytes := s.proto.cfg.SlicePool.Get() + defer s.proto.cfg.SlicePool.Put(bytes) + + messageSize := event.oneof.Size() + recordSize := ProtoStreamV1RecordHeaderSize + messageSize + + if len(bytes) < recordSize { + return trace.BadParameter( + "error in buffer allocation, expected size to be >= %v, got %v", recordSize, len(bytes)) + } + binary.BigEndian.PutUint32(bytes, uint32(messageSize)) + _, err := event.oneof.MarshalTo(bytes[Int32Size:]) + if err != nil { + return trace.Wrap(err) + } + wroteBytes, err := s.writer.Write(bytes[:recordSize]) + if err != nil { + return trace.Wrap(err) + } + if wroteBytes != recordSize { + return trace.BadParameter("expected %v bytes to be written, got %v", recordSize, wroteBytes) + } + if event.index > s.lastEventIndex { + s.lastEventIndex = event.index + } + return nil +} + +// NewProtoReader returns a new proto reader with slice pool +func NewProtoReader(r io.Reader) *ProtoReader { + return &ProtoReader{ + reader: r, + lastIndex: -1, + } +} + +// AuditReader provides method to read +// audit events one by one +type AuditReader interface { + // Read reads audit events + Read(context.Context) (AuditEvent, error) +} + +const ( + // protoReaderStateInit is ready to start reading the next part + protoReaderStateInit = 0 + // protoReaderStateCurrent will read the data from the current part + protoReaderStateCurrent = iota + // protoReaderStateEOF indicates that reader has completed reading + // all parts + protoReaderStateEOF = iota + // protoReaderStateError indicates that reader has reached internal + // error and should close + protoReaderStateError = iota +) + +// ProtoReader reads protobuf encoding from reader +type ProtoReader struct { + gzipReader *gzipReader + padding int64 + reader io.Reader + sizeBytes [Int64Size]byte + messageBytes [MaxProtoMessageSizeBytes]byte + state int + error error + lastIndex int64 + stats ProtoReaderStats +} + +// ProtoReaderStats contains some reader statistics +type ProtoReaderStats struct { + // SkippedEvents is a counter with encountered + // events recorded several times or events + // that have been out of order as skipped + SkippedEvents int64 + // OutOfOrderEvents is a counter with events + // received out of order + OutOfOrderEvents int64 + // TotalEvents contains total amount of + // processed events (including duplicates) + TotalEvents int64 +} + +// ToFields returns a copy of the stats to be used as log fields +func (p ProtoReaderStats) ToFields() log.Fields { + return log.Fields{ + "skipped-events": p.SkippedEvents, + "out-of-order-events": p.OutOfOrderEvents, + "total-events": p.TotalEvents, + } +} + +// Close releases reader resources +func (r *ProtoReader) Close() error { + if r.gzipReader != nil { + return r.gzipReader.Close() + } + return nil +} + +// Reset sets reader to read from the new reader +// without resetting the stats, could be used +// to deduplicate the events +func (r *ProtoReader) Reset(reader io.Reader) error { + if r.error != nil { + return r.error + } + if r.gzipReader != nil { + if r.error = r.gzipReader.Close(); r.error != nil { + return trace.Wrap(r.error) + } + r.gzipReader = nil + } + r.reader = reader + r.state = protoReaderStateInit + return nil +} + +func (r *ProtoReader) setError(err error) error { + r.state = protoReaderStateError + r.error = err + return err +} + +// GetStats returns stats about processed events +func (r *ProtoReader) GetStats() ProtoReaderStats { + return r.stats +} + +// Read returns next event or io.EOF in case of the end of the parts +func (r *ProtoReader) Read(ctx context.Context) (AuditEvent, error) { + // periodic checks of context after fixed amount of iterations + // is an extra precaution to avoid + // accidental endless loop due to logic error crashing the system + // and allows ctx timeout to kick in if specified + var checkpointIteration int64 + for { + checkpointIteration++ + if checkpointIteration%defaults.MaxIterationLimit == 0 { + select { + case <-ctx.Done(): + if ctx.Err() != nil { + return nil, trace.Wrap(ctx.Err()) + } + return nil, trace.LimitExceeded("context has been cancelled") + default: + } + } + switch r.state { + case protoReaderStateEOF: + return nil, io.EOF + case protoReaderStateError: + return nil, r.error + case protoReaderStateInit: + // read the part header that consists of the protocol version + // and the part size (for the V1 version of the protocol) + _, err := io.ReadFull(r.reader, r.sizeBytes[:Int64Size]) + if err != nil { + // reached the end of the stream + if err == io.EOF { + r.state = protoReaderStateEOF + return nil, err + } + return nil, r.setError(trace.ConvertSystemError(err)) + } + protocolVersion := binary.BigEndian.Uint64(r.sizeBytes[:Int64Size]) + if protocolVersion != ProtoStreamV1 { + return nil, trace.BadParameter("unsupported protocol version %v", protocolVersion) + } + // read size of this gzipped part as encoded by V1 protocol version + _, err = io.ReadFull(r.reader, r.sizeBytes[:Int64Size]) + if err != nil { + return nil, r.setError(trace.ConvertSystemError(err)) + } + partSize := binary.BigEndian.Uint64(r.sizeBytes[:Int64Size]) + // read padding size (could be 0) + _, err = io.ReadFull(r.reader, r.sizeBytes[:Int64Size]) + if err != nil { + return nil, r.setError(trace.ConvertSystemError(err)) + } + r.padding = int64(binary.BigEndian.Uint64(r.sizeBytes[:Int64Size])) + gzipReader, err := newGzipReader(ioutil.NopCloser(io.LimitReader(r.reader, int64(partSize)))) + if err != nil { + return nil, r.setError(trace.Wrap(err)) + } + r.gzipReader = gzipReader + r.state = protoReaderStateCurrent + continue + // read the next version from the gzip reader + case protoReaderStateCurrent: + // the record consists of length of the protobuf encoded + // message and the message itself + _, err := io.ReadFull(r.gzipReader, r.sizeBytes[:Int32Size]) + if err != nil { + if err != io.EOF { + return nil, r.setError(trace.ConvertSystemError(err)) + } + // reached the end of the current part, but not necessarily + // the end of the stream + if err := r.gzipReader.Close(); err != nil { + return nil, r.setError(trace.ConvertSystemError(err)) + } + if r.padding != 0 { + skipped, err := io.CopyBuffer(ioutil.Discard, io.LimitReader(r.reader, r.padding), r.messageBytes[:]) + if err != nil { + return nil, r.setError(trace.ConvertSystemError(err)) + } + if skipped != r.padding { + return nil, r.setError(trace.BadParameter( + "data truncated, expected to read %v bytes, but got %v", r.padding, skipped)) + } + } + r.padding = 0 + r.gzipReader = nil + r.state = protoReaderStateInit + continue + } + messageSize := binary.BigEndian.Uint32(r.sizeBytes[:Int32Size]) + // zero message size indicates end of the part + // that sometimes is present in partially submitted parts + // that have to be filled with zeroes for parts smaller + // than minimum allowed size + if messageSize == 0 { + return nil, r.setError(trace.BadParameter("unexpected message size 0")) + } + _, err = io.ReadFull(r.gzipReader, r.messageBytes[:messageSize]) + if err != nil { + return nil, r.setError(trace.ConvertSystemError(err)) + } + oneof := OneOf{} + err = oneof.Unmarshal(r.messageBytes[:messageSize]) + if err != nil { + return nil, trace.Wrap(err) + } + event, err := FromOneOf(oneof) + if err != nil { + return nil, trace.Wrap(err) + } + r.stats.TotalEvents++ + if event.GetIndex() <= r.lastIndex { + r.stats.SkippedEvents++ + continue + } + if r.lastIndex > 0 && event.GetIndex() != r.lastIndex+1 { + r.stats.OutOfOrderEvents++ + } + r.lastIndex = event.GetIndex() + return event, nil + default: + return nil, trace.BadParameter("unsupported reader size") + } + } +} + +// ReadAll reads all events until EOF +func (r *ProtoReader) ReadAll(ctx context.Context) ([]AuditEvent, error) { + var events []AuditEvent + for { + event, err := r.Read(ctx) + if err != nil { + if err == io.EOF { + return events, nil + } + return nil, trace.Wrap(err) + } + events = append(events, event) + } +} + +// NewMemoryUploader returns a new memory uploader implementing multipart +// upload +func NewMemoryUploader(eventsC ...chan UploadEvent) *MemoryUploader { + up := &MemoryUploader{ + mtx: &sync.RWMutex{}, + uploads: make(map[string]*MemoryUpload), + objects: make(map[session.ID][]byte), + } + if len(eventsC) != 0 { + up.eventsC = eventsC[0] + } + return up +} + +// MemoryUploader uploads all bytes to memory, used in tests +type MemoryUploader struct { + mtx *sync.RWMutex + uploads map[string]*MemoryUpload + objects map[session.ID][]byte + eventsC chan UploadEvent +} + +// MemoryUpload is used in tests +type MemoryUpload struct { + // id is the upload ID + id string + // parts is the upload parts + parts map[int64][]byte + // sessionID is the session ID associated with the upload + sessionID session.ID + //completed specifies upload as completed + completed bool +} + +func (m *MemoryUploader) trySendEvent(event UploadEvent) { + if m.eventsC == nil { + return + } + select { + case m.eventsC <- event: + default: + } +} + +// CreateUpload creates a multipart upload +func (m *MemoryUploader) CreateUpload(ctx context.Context, sessionID session.ID) (*StreamUpload, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + upload := &StreamUpload{ + ID: uuid.New(), + SessionID: sessionID, + } + m.uploads[upload.ID] = &MemoryUpload{ + id: upload.ID, + sessionID: sessionID, + parts: make(map[int64][]byte), + } + return upload, nil +} + +// CompleteUpload completes the upload +func (m *MemoryUploader) CompleteUpload(ctx context.Context, upload StreamUpload, parts []StreamPart) error { + m.mtx.Lock() + defer m.mtx.Unlock() + log.Debugf("Complete %v with %v parts.", upload, len(parts)) + up, ok := m.uploads[upload.ID] + if !ok { + return trace.NotFound("upload not found") + } + if up.completed { + return trace.BadParameter("upload already completed") + } + // verify that all parts have been uploaded + var result []byte + partsSet := make(map[int64]bool, len(parts)) + for _, part := range parts { + partsSet[part.Number] = true + data, ok := up.parts[part.Number] + if !ok { + return trace.NotFound("part %v has not been uploaded", part.Number) + } + result = append(result, data...) + } + // exclude parts that are not requested to be completed + for number := range up.parts { + if !partsSet[number] { + delete(up.parts, number) + } + } + m.objects[upload.SessionID] = result + up.completed = true + m.trySendEvent(UploadEvent{SessionID: string(upload.SessionID), UploadID: upload.ID}) + return nil +} + +// UploadPart uploads part and returns the part +func (m *MemoryUploader) UploadPart(ctx context.Context, upload StreamUpload, partNumber int64, partBody io.ReadSeeker) (*StreamPart, error) { + data, err := ioutil.ReadAll(partBody) + if err != nil { + return nil, trace.Wrap(err) + } + m.mtx.Lock() + defer m.mtx.Unlock() + up, ok := m.uploads[upload.ID] + if !ok { + return nil, trace.NotFound("upload is not found") + } + up.parts[partNumber] = data + return &StreamPart{Number: partNumber}, nil +} + +// ListUploads lists uploads that have been initated but not completed with +// earlier uploads returned first +func (m *MemoryUploader) ListUploads(ctx context.Context) ([]StreamUpload, error) { + m.mtx.RLock() + defer m.mtx.RUnlock() + out := make([]StreamUpload, 0, len(m.uploads)) + for id := range m.uploads { + out = append(out, StreamUpload{ + ID: id, + }) + } + return out, nil +} + +// GetParts returns upload parts uploaded up to date, sorted by part number +func (m *MemoryUploader) GetParts(uploadID string) ([][]byte, error) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + up, ok := m.uploads[uploadID] + if !ok { + return nil, trace.NotFound("upload is not found") + } + + partNumbers := make([]int64, 0, len(up.parts)) + sortedParts := make([][]byte, 0, len(up.parts)) + for partNumber := range up.parts { + partNumbers = append(partNumbers, partNumber) + } + sort.Slice(partNumbers, func(i, j int) bool { + return partNumbers[i] < partNumbers[j] + }) + for _, partNumber := range partNumbers { + sortedParts = append(sortedParts, up.parts[partNumber]) + } + return sortedParts, nil +} + +// ListParts returns all uploaded parts for the completed upload in sorted order +func (m *MemoryUploader) ListParts(ctx context.Context, upload StreamUpload) ([]StreamPart, error) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + up, ok := m.uploads[upload.ID] + if !ok { + return nil, trace.NotFound("upload %v is not found", upload.ID) + } + + partNumbers := make([]int64, 0, len(up.parts)) + sortedParts := make([]StreamPart, 0, len(up.parts)) + for partNumber := range up.parts { + partNumbers = append(partNumbers, partNumber) + } + sort.Slice(partNumbers, func(i, j int) bool { + return partNumbers[i] < partNumbers[j] + }) + for _, partNumber := range partNumbers { + sortedParts = append(sortedParts, StreamPart{Number: partNumber}) + } + return sortedParts, nil +} + +// Upload uploads session tarball and returns URL with uploaded file +// in case of success. +func (m *MemoryUploader) Upload(ctx context.Context, sessionID session.ID, readCloser io.Reader) (string, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + _, ok := m.objects[sessionID] + if ok { + return "", trace.AlreadyExists("session %q already exists", sessionID) + } + data, err := ioutil.ReadAll(readCloser) + if err != nil { + return "", trace.ConvertSystemError(err) + } + m.objects[sessionID] = data + return string(sessionID), nil +} + +// Download downloads session tarball and writes it to writer +func (m *MemoryUploader) Download(ctx context.Context, sessionID session.ID, writer io.WriterAt) error { + m.mtx.RLock() + defer m.mtx.RUnlock() + + data, ok := m.objects[sessionID] + if !ok { + return trace.NotFound("session %q is not found", sessionID) + } + _, err := io.Copy(writer.(io.Writer), bytes.NewReader(data)) + if err != nil { + return trace.ConvertSystemError(err) + } + return nil +} diff --git a/lib/events/test/streamsuite.go b/lib/events/test/streamsuite.go new file mode 100644 index 0000000000000..238803507d631 --- /dev/null +++ b/lib/events/test/streamsuite.go @@ -0,0 +1,182 @@ +package test + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/session" + + "github.com/stretchr/testify/assert" +) + +// StreamParams configures parameters of a stream test suite +type StreamParams struct { + // PrintEvents is amount of print events to generate + PrintEvents int64 + // ConcurrentUploads is amount of concurrent uploads + ConcurrentUploads int + // MinUploadBytes is minimum required upload bytes + MinUploadBytes int64 +} + +// StreamSinglePart tests stream upload and subsequent download and reads the results +func StreamSinglePart(t *testing.T, handler events.MultipartHandler) { + StreamWithParameters(t, handler, StreamParams{ + PrintEvents: 1024, + MinUploadBytes: 1024 * 1024, + }) +} + +// Stream tests stream upload and subsequent download and reads the results +func Stream(t *testing.T, handler events.MultipartHandler) { + StreamWithParameters(t, handler, StreamParams{ + PrintEvents: 1024, + MinUploadBytes: 1024, + ConcurrentUploads: 2, + }) +} + +// StreamManyParts tests stream upload and subsequent download and reads the results +func StreamManyParts(t *testing.T, handler events.MultipartHandler) { + StreamWithParameters(t, handler, StreamParams{ + PrintEvents: 8192, + MinUploadBytes: 1024, + ConcurrentUploads: 64, + }) +} + +// StreamResumeManyParts tests stream upload, failure to complete, resuming +// and subsequent download and reads the results. +func StreamResumeManyParts(t *testing.T, handler events.MultipartHandler) { + StreamResumeWithParameters(t, handler, StreamParams{ + PrintEvents: 8192, + MinUploadBytes: 1024, + ConcurrentUploads: 64, + }) +} + +// StreamWithParameters tests stream upload and subsequent download and reads the results +func StreamWithParameters(t *testing.T, handler events.MultipartHandler, params StreamParams) { + ctx := context.TODO() + + inEvents := events.GenerateTestSession(events.SessionParams{PrintEvents: params.PrintEvents}) + sid := session.ID(inEvents[0].(events.SessionMetadataGetter).GetSessionID()) + + streamer, err := events.NewProtoStreamer(events.ProtoStreamerConfig{ + Uploader: handler, + MinUploadBytes: params.MinUploadBytes, + ConcurrentUploads: params.ConcurrentUploads, + }) + assert.Nil(t, err) + + stream, err := streamer.CreateAuditStream(ctx, sid) + assert.Nil(t, err) + + select { + case status := <-stream.Status(): + assert.Equal(t, status.LastEventIndex, int64(-1)) + case <-time.After(time.Second): + t.Fatalf("Timed out waiting for status update.") + } + + for _, event := range inEvents { + err := stream.EmitAuditEvent(ctx, event) + assert.Nil(t, err) + } + + err = stream.Complete(ctx) + assert.Nil(t, err) + + f, err := ioutil.TempFile("", string(sid)) + assert.Nil(t, err) + defer os.Remove(f.Name()) + defer f.Close() + + err = handler.Download(ctx, sid, f) + assert.Nil(t, err) + + _, err = f.Seek(0, 0) + assert.Nil(t, err) + + reader := events.NewProtoReader(f) + out, err := reader.ReadAll(ctx) + assert.Nil(t, err) + + stats := reader.GetStats() + assert.Equal(t, stats.SkippedEvents, int64(0)) + assert.Equal(t, stats.OutOfOrderEvents, int64(0)) + assert.Equal(t, stats.TotalEvents, int64(len(inEvents))) + + assert.Equal(t, inEvents, out) +} + +// StreamResumeWithParameters expects initial complete attempt to fail +// but subsequent resume to succeed +func StreamResumeWithParameters(t *testing.T, handler events.MultipartHandler, params StreamParams) { + ctx := context.TODO() + + inEvents := events.GenerateTestSession(events.SessionParams{PrintEvents: params.PrintEvents}) + sid := session.ID(inEvents[0].(events.SessionMetadataGetter).GetSessionID()) + + streamer, err := events.NewProtoStreamer(events.ProtoStreamerConfig{ + Uploader: handler, + MinUploadBytes: params.MinUploadBytes, + ConcurrentUploads: params.ConcurrentUploads, + }) + assert.Nil(t, err) + + upload, err := handler.CreateUpload(ctx, sid) + assert.Nil(t, err) + + stream, err := streamer.CreateAuditStreamForUpload(ctx, sid, *upload) + assert.Nil(t, err) + + for _, event := range inEvents { + err := stream.EmitAuditEvent(ctx, event) + assert.Nil(t, err) + } + + err = stream.Complete(ctx) + assert.NotNil(t, err, "First complete attempt should fail here.") + + stream, err = streamer.ResumeAuditStream(ctx, sid, upload.ID) + assert.Nil(t, err) + + // First update always starts with -1 and indicates + // that resume has been started successfully + select { + case status := <-stream.Status(): + assert.Equal(t, status.LastEventIndex, int64(-1)) + case <-time.After(time.Second): + t.Fatalf("Timed out waiting for status update.") + } + + err = stream.Complete(ctx) + assert.Nil(t, err, "Complete after resume should succeed") + + f, err := ioutil.TempFile("", string(sid)) + assert.Nil(t, err) + defer os.Remove(f.Name()) + defer f.Close() + + err = handler.Download(ctx, sid, f) + assert.Nil(t, err) + + _, err = f.Seek(0, 0) + assert.Nil(t, err) + + reader := events.NewProtoReader(f) + out, err := reader.ReadAll(ctx) + assert.Nil(t, err) + + stats := reader.GetStats() + assert.Equal(t, stats.SkippedEvents, int64(0)) + assert.Equal(t, stats.OutOfOrderEvents, int64(0)) + assert.Equal(t, stats.TotalEvents, int64(len(inEvents))) + + assert.Equal(t, inEvents, out) +} diff --git a/lib/events/test/suite.go b/lib/events/test/suite.go index 25abf70139910..9c0751203de71 100644 --- a/lib/events/test/suite.go +++ b/lib/events/test/suite.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Gravitational, Inc. +Copyright 2018-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( "encoding/json" "io/ioutil" "os" - "path/filepath" + "testing" "time" "github.com/gravitational/teleport/lib/defaults" @@ -32,47 +32,44 @@ import ( "github.com/gravitational/teleport/lib/session" "github.com/jonboulle/clockwork" + "github.com/stretchr/testify/assert" "gopkg.in/check.v1" ) -// HandlerSuite is a conformance test suite to verify external UploadHandlers -// behavior. -type HandlerSuite struct { - Handler events.UploadHandler -} - -func (s *HandlerSuite) UploadDownload(c *check.C) { +// UploadDownload tests uploads and downloads +func UploadDownload(t *testing.T, handler events.MultipartHandler) { val := "hello, how is it going? this is the uploaded file" id := session.NewID() - _, err := s.Handler.Upload(context.TODO(), id, bytes.NewBuffer([]byte(val))) - c.Assert(err, check.IsNil) + _, err := handler.Upload(context.TODO(), id, bytes.NewBuffer([]byte(val))) + assert.Nil(t, err) - dir := c.MkDir() - f, err := os.Create(filepath.Join(dir, string(id))) - c.Assert(err, check.IsNil) + f, err := ioutil.TempFile("", string(id)) + assert.Nil(t, err) + defer os.Remove(f.Name()) defer f.Close() - err = s.Handler.Download(context.TODO(), id, f) - c.Assert(err, check.IsNil) + err = handler.Download(context.TODO(), id, f) + assert.Nil(t, err) _, err = f.Seek(0, 0) - c.Assert(err, check.IsNil) + assert.Nil(t, err) data, err := ioutil.ReadAll(f) - c.Assert(err, check.IsNil) - c.Assert(string(data), check.Equals, val) + assert.Nil(t, err) + assert.Equal(t, string(data), val) } -func (s *HandlerSuite) DownloadNotFound(c *check.C) { +// DownloadNotFound tests handling of the scenario when download is not found +func DownloadNotFound(t *testing.T, handler events.MultipartHandler) { id := session.NewID() - dir := c.MkDir() - f, err := os.Create(filepath.Join(dir, string(id))) - c.Assert(err, check.IsNil) + f, err := ioutil.TempFile("", string(id)) + assert.Nil(t, err) + defer os.Remove(f.Name()) defer f.Close() - err = s.Handler.Download(context.TODO(), id, f) - fixtures.ExpectNotFound(c, err) + err = handler.Download(context.TODO(), id, f) + fixtures.AssertNotFound(t, err) } // EventsSuite is a conformance test suite to verify external event backends @@ -85,7 +82,7 @@ type EventsSuite struct { // SessionEventsCRUD covers session events func (s *EventsSuite) SessionEventsCRUD(c *check.C) { // Bob has logged in - err := s.Log.EmitAuditEvent(events.UserLocalLogin, events.EventFields{ + err := s.Log.EmitAuditEventLegacy(events.UserLocalLoginE, events.EventFields{ events.LoginMethod: events.LoginMethodSAML, events.AuthAttemptSuccess: true, events.EventUser: "bob", diff --git a/lib/events/uploader.go b/lib/events/uploader.go index b1c453efb53dc..1ed0124510688 100644 --- a/lib/events/uploader.go +++ b/lib/events/uploader.go @@ -45,10 +45,18 @@ type UploadHandler interface { Download(ctx context.Context, sessionID session.ID, writer io.WriterAt) error } +// MultipartHandler handles both multipart uploads and downloads +type MultipartHandler interface { + UploadHandler + MultipartUploader +} + // UploadEvent is emitted by uploader and is used in tests type UploadEvent struct { // SessionID is a session ID SessionID string + // UploadID specifies upload ID for a successful upload + UploadID string // Error is set in case if event resulted in error Error error } @@ -73,7 +81,7 @@ type UploaderConfig struct { AuditLog IAuditLog // EventsC is an event channel used to signal events // used in tests - EventsC chan *UploadEvent + EventsC chan UploadEvent } // CheckAndSetDefaults checks and sets default values of UploaderConfig @@ -211,7 +219,7 @@ func (u *Uploader) emitEvent(e UploadEvent) { return } select { - case u.EventsC <- &e: + case u.EventsC <- e: return default: u.Warningf("Skip send event on a blocked channel.") diff --git a/lib/events/writer.go b/lib/events/writer.go index 38a02d9634781..a855615625ad2 100644 --- a/lib/events/writer.go +++ b/lib/events/writer.go @@ -53,8 +53,8 @@ func (w *WriterLog) Close() error { return w.w.Close() } -// EmitAuditEvent emits audit event -func (w *WriterLog) EmitAuditEvent(event Event, fields EventFields) error { +// EmitAuditEventLegacy emits audit event +func (w *WriterLog) EmitAuditEventLegacy(event Event, fields EventFields) error { err := UpdateEventFields(event, fields, w.clock, w.newUID) if err != nil { log.Error(err) diff --git a/lib/fixtures/fixtures.go b/lib/fixtures/fixtures.go index 3862dfa0447dd..1e1294f5c209b 100644 --- a/lib/fixtures/fixtures.go +++ b/lib/fixtures/fixtures.go @@ -1,7 +1,9 @@ package fixtures import ( + "reflect" "runtime/debug" + "testing" "github.com/davecgh/go-spew/spew" "github.com/gravitational/trace" @@ -44,11 +46,119 @@ func ExpectLimitExceeded(c *check.C, err error) { c.Assert(trace.IsLimitExceeded(err), check.Equals, true, check.Commentf("expected LimitExceeded, got %T %v at %v", trace.Unwrap(err), err, string(debug.Stack()))) } +// AssertNotFound expects not found error +func AssertNotFound(t *testing.T, err error) { + if trace.IsNotFound(err) == false { + t.Fatalf("Expected NotFound, got %T %v at %v.", trace.Unwrap(err), err, string(debug.Stack())) + } +} + +// AssertBadParameter expects bad parameter error +func AssertBadParameter(t *testing.T, err error) { + if trace.IsBadParameter(err) == false { + t.Fatalf("Expected BadParameter, got %T %v at %v.", trace.Unwrap(err), err, string(debug.Stack())) + } +} + +// AssertCompareFailed expects compare failed error +func AssertCompareFailed(t *testing.T, err error) { + if trace.IsCompareFailed(err) == false { + t.Fatalf("Expected CompareFailed, got %T %v at %v.", trace.Unwrap(err), err, string(debug.Stack())) + } +} + +// AssertAccessDenied expects error to be access denied +func AssertAccessDenied(t *testing.T, err error) { + if trace.IsAccessDenied(err) == false { + t.Fatalf("Expected AccessDenied, got %T %v at %v.", trace.Unwrap(err), err, string(debug.Stack())) + } +} + +// AssertAlreadyExists expects already exists error +func AssertAlreadyExists(t *testing.T, err error) { + if trace.IsAlreadyExists(err) == false { + t.Fatalf("Expected AlreadyExists, got %T %v at %v.", trace.Unwrap(err), err, string(debug.Stack())) + } +} + +// AssertConnectionProblem expects connection problem error +func AssertConnectionProblem(t *testing.T, err error) { + if trace.IsConnectionProblem(err) == false { + t.Fatalf("Expected ConnectionProblem, got %T %v at %v.", trace.Unwrap(err), err, string(debug.Stack())) + } +} + // DeepCompare uses gocheck DeepEquals but provides nice diff if things are not equal func DeepCompare(c *check.C, a, b interface{}) { d := &spew.ConfigState{Indent: " ", DisableMethods: true, DisablePointerMethods: true, DisablePointerAddresses: true} - c.Assert(a, check.DeepEquals, b, check.Commentf("%v\nStack:\n%v\n", diff.Diff(d.Sdump(a), d.Sdump(b)), string(debug.Stack()))) + if !reflect.DeepEqual(a, b) { + c.Fatalf("Values are not equal\n%v\nStack:\n%v\n", diff.Diff(d.Sdump(a), d.Sdump(b)), string(debug.Stack())) + } +} + +// DeepCompareSlices compares two slices +func DeepCompareSlices(c *check.C, a, b interface{}) { + aval, bval := reflect.ValueOf(a), reflect.ValueOf(b) + if aval.Kind() != reflect.Slice { + c.Fatalf("%v is not a map, %T", a, a) + } + + if bval.Kind() != reflect.Slice { + c.Fatalf("%v is not a map, %T", b, b) + } + + if aval.Len() != bval.Len() { + c.Fatalf("slices have different length of %v and %v", aval.Len(), bval.Len()) + } + + for i := 0; i < aval.Len(); i++ { + DeepCompare(c, aval.Index(i).Interface(), bval.Index(i).Interface()) + } +} + +// DeepCompareMaps compares two maps +func DeepCompareMaps(c *check.C, a, b interface{}) { + aval, bval := reflect.ValueOf(a), reflect.ValueOf(b) + if aval.Kind() != reflect.Map { + c.Fatalf("%v is not a map, %T", a, a) + } + + if bval.Kind() != reflect.Map { + c.Fatalf("%v is not a map, %T", b, b) + } + + for _, k := range aval.MapKeys() { + vala := aval.MapIndex(k) + valb := bval.MapIndex(k) + + if !vala.IsValid() { + c.Fatalf("expected valid value for %v in %v", k.Interface(), a) + } + + if !valb.IsValid() { + c.Fatalf("key %v is found in %v, but not in %v", k.Interface(), a, b) + } + } + + for _, k := range bval.MapKeys() { + vala := aval.MapIndex(k) + valb := bval.MapIndex(k) + + if !valb.IsValid() { + c.Fatalf("expected valid value for %v in %v", k.Interface(), a) + } + + if !vala.IsValid() { + c.Fatalf("key %v is found in %v, but not in %v", k.Interface(), a, b) + } + + if reflect.ValueOf(vala.Interface()).Kind() == reflect.Map { + DeepCompareMaps(c, vala.Interface(), valb.Interface()) + } else { + DeepCompare(c, vala.Interface(), valb.Interface()) + } + } } const SAMLOktaAuthRequestID = `_4d84cad1-1c61-4e4f-8ab6-1358b8d0da77` diff --git a/lib/fixtures/keys.go b/lib/fixtures/keys.go index 3bf4e700af40c..56f9930ef6d96 100644 --- a/lib/fixtures/keys.go +++ b/lib/fixtures/keys.go @@ -1,5 +1,16 @@ package fixtures +import ( + "crypto/tls" + "crypto/x509" + "net/http" + "strings" + + "golang.org/x/net/http2" + + "github.com/gravitational/trace" +) + var PEMBytes = map[string][]byte{ "ecdsa": []byte(`-----BEGIN EC PRIVATE KEY----- MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49 @@ -34,3 +45,89 @@ MHeDg2Bs7/XZsIrn6vo7kXmQSoQKA8O2E7rYSigUayBKa/+5thbnjKlEP+slBzmp NFJgTFxC2o3mVBkQ/s6FeDl62hpMheCuO6jRYbZjsM2tUeAKORws -----END RSA PRIVATE KEY-----`), } + +// LocalhostCert is a PEM-encoded TLS cert with SAN IPs +// "127.0.0.1" and "[::1]", expiring at Jan 29 16:00:00 2084 GMT. +// generated from src/crypto/tls: +// go run generate_cert.go --rsa-bits 1024 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var LocalhostCert = []byte(`-----BEGIN CERTIFICATE----- +MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zANBgkqhkiG9w0BAQsFADAS +MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw +MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9SjY1bIw4 +iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZBl2+XsDul +rKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQABo2gwZjAO +BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw +AwEB/zAuBgNVHREEJzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAA +AAAAATANBgkqhkiG9w0BAQsFAAOBgQCEcetwO59EWk7WiJsG4x8SY+UIAA+flUI9 +tyC4lNhbcF2Idq9greZwbYCqTTTr2XiRNSMLCOjKyI7ukPoPjo16ocHj+P3vZGfs +h1fIw3cSS2OolhloGw/XM6RWPWtPAlGykKLciQrBru5NAPvCMsb/I1DAceTiotQM +fblo6RBxUQ== +-----END CERTIFICATE-----`) + +// LocalhostKey is the private key for localhostCert. +var LocalhostKey = []byte(testingKey(`-----BEGIN RSA TESTING KEY----- +MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9 +SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB +l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB +AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet +3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb +uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H +qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp +jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY +fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U +fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU +y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX +qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo +f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA== +-----END RSA TESTING KEY-----`)) + +func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") } + +// TLSConfig is TLS configuration for running local TLS tests +type TLSConfig struct { + // CertPool is a trusted certificate authority pool + // that consists of self signed cert + CertPool *x509.CertPool + // Certificate is a client x509 client cert + Certificate *x509.Certificate + // TLS is a TLS server configuration + TLS *tls.Config +} + +// NewClient creates a HTTP client +func (t *TLSConfig) NewClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: t.CertPool, + }, + }, + } +} + +// LocalTLSConfig returns local TLS config with self signed certificate +func LocalTLSConfig() (*TLSConfig, error) { + cert, err := tls.X509KeyPair(LocalhostCert, LocalhostKey) + if err != nil { + return nil, trace.Wrap(err) + } + + cfg := &tls.Config{ + NextProtos: []string{http2.NextProtoTLS, "http/1.1"}, + Certificates: []tls.Certificate{cert}, + } + + certificate, err := x509.ParseCertificate(cfg.Certificates[0].Certificate[0]) + if err != nil { + return nil, trace.Wrap(err) + } + certPool := x509.NewCertPool() + certPool.AddCert(certificate) + + return &TLSConfig{ + CertPool: certPool, + TLS: cfg, + Certificate: certificate, + }, nil +} diff --git a/lib/httplib/grpccreds.go b/lib/httplib/grpccreds.go new file mode 100644 index 0000000000000..5916d07d522a6 --- /dev/null +++ b/lib/httplib/grpccreds.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httplib + +import ( + "context" + "crypto/tls" + "net" + "syscall" + + "google.golang.org/grpc/credentials" + + "github.com/gravitational/trace" +) + +// TLSCreds is the credentials required for authenticating a connection using TLS. +type TLSCreds struct { + // TLS configuration + Config *tls.Config +} + +// Info returns protocol info +func (c TLSCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.Config.ServerName, + } +} + +// ClientHandshake callback is called to perform client handshake on the tls conn +func (c *TLSCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + return nil, nil, trace.NotImplemented("client handshakes are not supported") +} + +// ServerHandshake callback is called to perform server TLS handshake +// this wrapper makes sure that the connection is already tls and +// handshake has been performed +func (c *TLSCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + tlsConn, ok := rawConn.(*tls.Conn) + if !ok { + return nil, nil, trace.BadParameter("expected TLS connection") + } + return WrapSyscallConn(rawConn, tlsConn), credentials.TLSInfo{State: tlsConn.ConnectionState()}, nil +} + +// Clone clones transport credentials +func (c *TLSCreds) Clone() credentials.TransportCredentials { + return &TLSCreds{ + Config: c.Config.Clone(), + } +} + +// OverrideServerName overrides server name in the TLS config +func (c *TLSCreds) OverrideServerName(serverNameOverride string) error { + c.Config.ServerName = serverNameOverride + return nil +} + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/lib/kube/proxy/forwarder.go b/lib/kube/proxy/forwarder.go index 1330bffe87551..b1a6b684442d9 100644 --- a/lib/kube/proxy/forwarder.go +++ b/lib/kube/proxy/forwarder.go @@ -34,6 +34,7 @@ import ( "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/events/filesessions" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/reversetunnel" "github.com/gravitational/teleport/lib/services" @@ -75,8 +76,6 @@ type ForwarderConfig struct { // AccessPoint is a caching access point to auth server // for caching common requests to the backend AccessPoint auth.AccessPoint - // AuditLog is audit log to send events to - AuditLog events.IAuditLog // ServerID is a unique ID of a proxy server ServerID string // ClusterOverride if set, routes all requests @@ -376,8 +375,9 @@ func (f *Forwarder) setupContext(ctx auth.AuthContext, req *http.Request, isRemo if err != nil { return nil, trace.Wrap(err) } - if ctx.Identity.RouteToCluster != "" { - targetCluster, err = f.Tunnel.GetSite(ctx.Identity.RouteToCluster) + identity := ctx.Identity.GetIdentity() + if identity.RouteToCluster != "" { + targetCluster, err = f.Tunnel.GetSite(identity.RouteToCluster) if err != nil { return nil, trace.Wrap(err) } @@ -435,6 +435,31 @@ func (f *Forwarder) setupContext(ctx auth.AuthContext, req *http.Request, isRemo return authCtx, nil } +// newStreamer returns sync or async streamer based on the configuration +// of the server and the session, sync streamer sends the events +// directly to the auth server and blocks if the events can not be received, +// async streamer buffers the events to disk and uploads the events later +func (f *Forwarder) newStreamer(ctx *authContext) (events.Streamer, error) { + mode := ctx.clusterConfig.GetSessionRecording() + if services.IsRecordSync(mode) { + f.Debugf("Using sync streamer for session") + return f.Client, nil + } + f.Debugf("Using async streamer for session.") + dir := filepath.Join( + f.DataDir, teleport.LogsDir, teleport.ComponentUpload, + events.StreamingLogsDir, defaults.Namespace, + ) + fileStreamer, err := filesessions.NewStreamer(dir) + if err != nil { + return nil, trace.Wrap(err) + } + // TeeStreamer sends non-print and non disk events + // to the audit log in async mode, while buffering all + // events on disk for further upload at the end of the session + return events.NewTeeStreamer(fileStreamer, f.Client), nil +} + // exec forwards all exec requests to the target server, captures // all output from the session func (f *Forwarder) exec(ctx *authContext, w http.ResponseWriter, req *http.Request, p httprouter.Params) (interface{}, error) { @@ -455,47 +480,71 @@ func (f *Forwarder) exec(ctx *authContext, w http.ResponseWriter, req *http.Requ } var recorder events.SessionRecorder + var emitter events.Emitter sessionID := session.NewID() var err error if request.tty { + streamer, err := f.newStreamer(ctx) + if err != nil { + return nil, trace.Wrap(err) + } // create session recorder // get the audit log from the server and create a session recorder. this will // be a discard audit log if the proxy is in recording mode and a teleport // node so we don't create double recordings. - recorder, err = events.NewForwardRecorder(events.ForwardRecorderConfig{ - DataDir: filepath.Join(f.DataDir, teleport.LogsDir), - SessionID: sessionID, - Namespace: f.Namespace, - RecordSessions: ctx.clusterConfig.GetSessionRecording() != services.RecordOff, - Component: teleport.Component(teleport.ComponentSession, teleport.ComponentKube), - ForwardTo: f.AuditLog, + recorder, err = events.NewAuditWriter(events.AuditWriterConfig{ + // Audit stream is using server context, not session context, + // to make sure that session is uploaded even after it is closed + Context: f.Context, + Streamer: streamer, + Clock: f.Clock, + SessionID: sessionID, + ServerID: f.ServerID, + Namespace: f.Namespace, + RecordOutput: ctx.clusterConfig.GetSessionRecording() != services.RecordOff, + Component: teleport.Component(teleport.ComponentSession, teleport.ComponentKube), }) if err != nil { return nil, trace.Wrap(err) } - defer recorder.Close() + emitter = recorder + defer recorder.Close(f.Context) request.onResize = func(resize remotecommand.TerminalSize) { params := session.TerminalParams{ W: int(resize.Width), H: int(resize.Height), } // Build the resize event. - resizeEvent := events.EventFields{ - events.EventProtocol: events.EventProtocolKube, - events.EventType: events.ResizeEvent, - events.EventNamespace: f.Namespace, - events.SessionEventID: sessionID, - events.EventLogin: ctx.User.GetName(), - events.EventUser: ctx.User.GetName(), - events.TerminalSize: params.Serialize(), + resizeEvent := &events.Resize{ + Metadata: events.Metadata{ + Type: events.ResizeEvent, + Code: events.TerminalResizeCode, + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: req.RemoteAddr, + Protocol: events.EventProtocolKube, + }, + ServerMetadata: events.ServerMetadata{ + ServerNamespace: f.Namespace, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(sessionID), + }, + UserMetadata: events.UserMetadata{ + User: ctx.User.GetName(), + Login: ctx.User.GetName(), + }, + TerminalSize: params.Serialize(), } // Report the updated window size to the event log (this is so the sessions // can be replayed correctly). - if err := recorder.GetAuditLog().EmitAuditEvent(events.TerminalResize, resizeEvent); err != nil { - f.Warnf("Failed to emit terminal resize event: %v", err) + if err := recorder.EmitAuditEvent(f.Context, resizeEvent); err != nil { + f.WithError(err).Warn("Failed to emit terminal resize event.") } } + } else { + emitter = f.Client } sess, err := f.getOrCreateClusterSession(*ctx) @@ -513,21 +562,33 @@ func (f *Forwarder) exec(ctx *authContext, w http.ResponseWriter, req *http.Requ W: 100, H: 100, } - if err := recorder.GetAuditLog().EmitAuditEvent(events.SessionStart, events.EventFields{ - events.EventProtocol: events.EventProtocolKube, - events.EventNamespace: f.Namespace, - events.SessionEventID: string(sessionID), - events.SessionServerID: f.ServerID, - events.SessionServerHostname: sess.cluster.GetName(), - events.SessionServerAddr: sess.cluster.targetAddr, - events.SessionInteractive: true, - events.EventLogin: ctx.User.GetName(), - events.EventUser: ctx.User.GetName(), - events.LocalAddr: sess.cluster.targetAddr, - events.RemoteAddr: req.RemoteAddr, - events.TerminalSize: termParams.Serialize(), - }); err != nil { - f.Warnf("Failed to emit session start event: %v", err) + sessionStartEvent := &events.SessionStart{ + Metadata: events.Metadata{ + Type: events.SessionStartEvent, + Code: events.SessionStartCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: f.ServerID, + ServerNamespace: f.Namespace, + ServerHostname: sess.cluster.GetName(), + ServerAddr: sess.cluster.targetAddr, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(sessionID), + }, + UserMetadata: events.UserMetadata{ + User: ctx.User.GetName(), + Login: ctx.User.GetName(), + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: req.RemoteAddr, + LocalAddr: sess.cluster.targetAddr, + Protocol: events.EventProtocolKube, + }, + TerminalSize: termParams.Serialize(), + } + if err := emitter.EmitAuditEvent(f.Context, sessionStartEvent); err != nil { + f.WithError(err).Warn("Failed to emit event.") } } @@ -562,47 +623,72 @@ func (f *Forwarder) exec(ctx *authContext, w http.ResponseWriter, req *http.Requ } if request.tty { - // send an event indicating that this session has ended - if err := recorder.GetAuditLog().EmitAuditEvent(events.SessionEnd, events.EventFields{ - events.EventProtocol: events.EventProtocolKube, - events.SessionEventID: sessionID, - events.SessionServerID: f.ServerID, - events.SessionServerHostname: sess.cluster.GetName(), - events.SessionServerAddr: sess.cluster.targetAddr, - events.SessionInteractive: true, - events.SessionStartTime: sess.startTime, - events.SessionEndTime: time.Now().UTC(), + sessionEndEvent := &events.SessionEnd{ + Metadata: events.Metadata{ + Type: events.SessionEndEvent, + Code: events.SessionEndCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: f.ServerID, + ServerNamespace: f.Namespace, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(sessionID), + }, + UserMetadata: events.UserMetadata{ + User: ctx.User.GetName(), + Login: ctx.User.GetName(), + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: req.RemoteAddr, + LocalAddr: sess.cluster.targetAddr, + Protocol: events.EventProtocolKube, + }, + Interactive: true, // There can only be 1 participant, k8s sessions are not join-able. - events.SessionParticipants: []string{ctx.User.GetName()}, - events.EventUser: ctx.User.GetName(), - events.EventNamespace: f.Namespace, - }); err != nil { - f.Warnf("Failed to emit session end event: %v", err) + Participants: []string{ctx.User.GetName()}, + EndTime: time.Now().UTC(), + } + if err := emitter.EmitAuditEvent(f.Context, sessionEndEvent); err != nil { + f.WithError(err).Warn("Failed to emit session end event.") } } else { - f.Debugf("No tty, sending exec event.") // send an exec event - fields := events.EventFields{ - events.EventProtocol: events.EventProtocolKube, - events.ExecEventCommand: strings.Join(request.cmd, " "), - events.EventLogin: ctx.User.GetName(), - events.EventUser: ctx.User.GetName(), - events.LocalAddr: sess.cluster.targetAddr, - events.RemoteAddr: req.RemoteAddr, - events.EventNamespace: f.Namespace, + execEvent := &events.Exec{ + Metadata: events.Metadata{ + Type: events.ExecEvent, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: f.ServerID, + ServerNamespace: f.Namespace, + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(sessionID), + }, + UserMetadata: events.UserMetadata{ + User: ctx.User.GetName(), + Login: ctx.User.GetName(), + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: req.RemoteAddr, + LocalAddr: sess.cluster.targetAddr, + Protocol: events.EventProtocolKube, + }, + CommandMetadata: events.CommandMetadata{ + Command: strings.Join(request.cmd, " "), + }, } if err != nil { - fields[events.ExecEventError] = err.Error() + execEvent.Code = events.ExecFailureCode + execEvent.Error = err.Error() if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() { - fields[events.ExecEventCode] = fmt.Sprintf("%d", exitErr.ExitStatus()) - } - if err := f.AuditLog.EmitAuditEvent(events.ExecFailure, fields); err != nil { - f.Warnf("Failed to emit exec failure event: %v", err) + execEvent.ExitCode = fmt.Sprintf("%d", exitErr.ExitStatus()) } } else { - if err := f.AuditLog.EmitAuditEvent(events.Exec, fields); err != nil { - f.Warnf("Failed to emit exec failure event: %v", err) - } + execEvent.Code = events.ExecCode + } + if err := emitter.EmitAuditEvent(f.Context, execEvent); err != nil { + f.WithError(err).Warn("Failed to emit event.") } } @@ -632,20 +718,30 @@ func (f *Forwarder) portForward(ctx *authContext, w http.ResponseWriter, req *ht } onPortForward := func(addr string, success bool) { - event := events.PortForward + portForward := &events.PortForward{ + Metadata: events.Metadata{ + Type: events.PortForwardEvent, + Code: events.PortForwardCode, + }, + UserMetadata: events.UserMetadata{ + Login: ctx.User.GetName(), + User: ctx.User.GetName(), + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: sess.cluster.targetAddr, + RemoteAddr: req.RemoteAddr, + Protocol: events.EventProtocolKube, + }, + Addr: addr, + Status: events.Status{ + Success: success, + }, + } if !success { - event = events.PortForwardFailure + portForward.Code = events.PortForwardFailureCode } - if err := f.AuditLog.EmitAuditEvent(event, events.EventFields{ - events.EventProtocol: events.EventProtocolKube, - events.PortForwardAddr: addr, - events.PortForwardSuccess: success, - events.EventLogin: ctx.User.GetName(), - events.EventUser: ctx.User.GetName(), - events.LocalAddr: sess.cluster.targetAddr, - events.RemoteAddr: req.RemoteAddr, - }); err != nil { - f.Warnf("Failed to emit port-forward audit event: %v", err) + if err := f.Client.EmitAuditEvent(f.Context, portForward); err != nil { + f.WithError(err).Warn("Failed to emit event.") } } @@ -875,8 +971,8 @@ func (s *clusterSession) monitorConn(conn net.Conn, err error) (net.Conn, error) Context: ctx, TeleportUser: s.User.GetName(), ServerID: s.parent.ServerID, - Audit: s.parent.AuditLog, Entry: s.parent.Entry, + Emitter: s.parent.Client, }) if err != nil { tc.Close() @@ -1110,7 +1206,8 @@ func (f *Forwarder) requestCertificate(ctx authContext) (*tls.Config, error) { // Note: ctx.Identity can potentially have temporary roles granted via // workflow API. Always use the Subject() method to preserve the roles from // caller's certificate. - subject, err := ctx.Identity.Subject() + identity := ctx.Identity.GetIdentity() + subject, err := identity.Subject() if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/kube/proxy/forwarder_test.go b/lib/kube/proxy/forwarder_test.go index 5d00719c9e286..e002eb607b578 100644 --- a/lib/kube/proxy/forwarder_test.go +++ b/lib/kube/proxy/forwarder_test.go @@ -55,14 +55,14 @@ func (s ForwarderSuite) TestRequestCertificate(c *check.C) { }, AuthContext: auth.AuthContext{ User: user, - Identity: tlsca.Identity{ + Identity: auth.WrapIdentity(tlsca.Identity{ Username: "bob", Groups: []string{"group a", "group b"}, Usage: []string{"usage a", "usage b"}, Principals: []string{"principal a", "principal b"}, KubernetesGroups: []string{"k8s group a", "k8s group b"}, Traits: map[string][]string{"trait a": []string{"b", "c"}}, - }, + }), }, } @@ -83,7 +83,7 @@ func (s ForwarderSuite) TestRequestCertificate(c *check.C) { c.Assert(err, check.IsNil) idFromCSR, err := tlsca.FromSubject(csr.Subject, time.Time{}) c.Assert(err, check.IsNil) - c.Assert(*idFromCSR, check.DeepEquals, ctx.Identity) + c.Assert(*idFromCSR, check.DeepEquals, ctx.Identity.GetIdentity()) } func (s ForwarderSuite) TestGetClusterSession(c *check.C) { @@ -263,7 +263,7 @@ func (s ForwarderSuite) TestAuthenticate(c *check.C) { authCtx := auth.AuthContext{ User: user, Checker: roles, - Identity: tlsca.Identity{RouteToCluster: tt.routeToCluster}, + Identity: auth.WrapIdentity(tlsca.Identity{RouteToCluster: tt.routeToCluster}), } authz := mockAuthorizer{ctx: &authCtx} if tt.authzErr { @@ -446,14 +446,14 @@ func (s ForwarderSuite) TestNewClusterSession(c *check.C) { authCtx := authContext{ AuthContext: auth.AuthContext{ User: user, - Identity: tlsca.Identity{ + Identity: auth.WrapIdentity(tlsca.Identity{ Username: "bob", Groups: []string{"group a", "group b"}, Usage: []string{"usage a", "usage b"}, Principals: []string{"principal a", "principal b"}, KubernetesGroups: []string{"k8s group a", "k8s group b"}, Traits: map[string][]string{"trait a": []string{"b", "c"}}, - }, + }), }, cluster: cluster{ RemoteSite: mockRemoteSite{name: "local"}, @@ -475,14 +475,14 @@ func (s ForwarderSuite) TestNewClusterSession(c *check.C) { authCtx = authContext{ AuthContext: auth.AuthContext{ User: user, - Identity: tlsca.Identity{ + Identity: auth.WrapIdentity(tlsca.Identity{ Username: "bob", Groups: []string{"group a", "group b"}, Usage: []string{"usage a", "usage b"}, Principals: []string{"principal a", "principal b"}, KubernetesGroups: []string{"k8s group a", "k8s group b"}, Traits: map[string][]string{"trait a": []string{"b", "c"}}, - }, + }), }, cluster: cluster{ RemoteSite: mockRemoteSite{name: "local"}, @@ -503,14 +503,14 @@ func (s ForwarderSuite) TestNewClusterSession(c *check.C) { authCtx = authContext{ AuthContext: auth.AuthContext{ User: user, - Identity: tlsca.Identity{ + Identity: auth.WrapIdentity(tlsca.Identity{ Username: "bob", Groups: []string{"group a", "group b"}, Usage: []string{"usage a", "usage b"}, Principals: []string{"principal a", "principal b"}, KubernetesGroups: []string{"k8s group a", "k8s group b"}, Traits: map[string][]string{"trait a": []string{"b", "c"}}, - }, + }), }, cluster: cluster{ RemoteSite: mockRemoteSite{name: "remote"}, diff --git a/lib/multiplexer/multiplexer_test.go b/lib/multiplexer/multiplexer_test.go index 7ae0258d0853c..303a936718f8d 100644 --- a/lib/multiplexer/multiplexer_test.go +++ b/lib/multiplexer/multiplexer_test.go @@ -32,395 +32,492 @@ import ( "golang.org/x/crypto/ssh" + "github.com/gravitational/teleport/lib/fixtures" + "github.com/gravitational/teleport/lib/httplib" + "github.com/gravitational/teleport/lib/multiplexer/test" "github.com/gravitational/teleport/lib/sshutils" "github.com/gravitational/teleport/lib/utils" - "gopkg.in/check.v1" -) - -func Test(t *testing.T) { check.TestingT(t) } - -type MuxSuite struct { - signer ssh.Signer -} - -var _ = fmt.Printf -var _ = check.Suite(&MuxSuite{}) - -func (s *MuxSuite) SetUpSuite(c *check.C) { - var err error - - utils.InitLoggerForTests() - - _, s.signer, err = utils.CreateCertificate("foo", ssh.HostCert) - c.Assert(err, check.IsNil) -} + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" -// TestMultiplexing tests basic use case of multiplexing TLS -// and SSH on the same listener socket -func (s *MuxSuite) TestMultiplexing(c *check.C) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, check.IsNil) - - mux, err := New(Config{ - Listener: listener, - EnableProxyProtocol: true, - }) - c.Assert(err, check.IsNil) - go mux.Serve() - defer mux.Close() - - backend1 := &httptest.Server{ - Listener: mux.TLS(), - Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "backend 1") - }), - }, - } - backend1.StartTLS() - defer backend1.Close() - - called := false - sshHandler := sshutils.NewChanHandlerFunc(func(_ context.Context, _ *sshutils.ConnectionContext, nch ssh.NewChannel) { - called = true - err := nch.Reject(ssh.Prohibited, "nothing to see here") - c.Assert(err, check.IsNil) - }) - - srv, err := sshutils.NewServer( - "test", - utils.NetAddr{AddrNetwork: "tcp", Addr: "localhost:0"}, - sshHandler, - []ssh.Signer{s.signer}, - sshutils.AuthMethods{Password: pass("abc123")}, - ) - c.Assert(err, check.IsNil) - go srv.Serve(mux.SSH()) - defer srv.Close() - clt, err := ssh.Dial("tcp", listener.Addr().String(), &ssh.ClientConfig{ - Auth: []ssh.AuthMethod{ssh.Password("abc123")}, - Timeout: time.Second, - HostKeyCallback: ssh.FixedHostKey(s.signer.PublicKey()), - }) - c.Assert(err, check.IsNil) - defer clt.Close() - - // call new session to initiate opening new channel - _, err = clt.NewSession() - c.Assert(err, check.NotNil) - // make sure the channel handler was called OK - c.Assert(called, check.Equals, true) - - client := testClient(backend1) - re, err := client.Get(backend1.URL) - c.Assert(err, check.IsNil) - defer re.Body.Close() - bytes, err := ioutil.ReadAll(re.Body) - c.Assert(err, check.IsNil) - c.Assert(string(bytes), check.Equals, "backend 1") - - // Close mux, new requests should fail - mux.Close() - mux.Wait() - - // use new client to use new connection pool - client = testClient(backend1) - re, err = client.Get(backend1.URL) - if err == nil { - re.Body.Close() - } - c.Assert(err, check.NotNil) -} - -// TestProxy tests Proxy line support protocol -func (s *MuxSuite) TestProxy(c *check.C) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, check.IsNil) + "github.com/stretchr/testify/assert" +) - mux, err := New(Config{ - Listener: listener, - EnableProxyProtocol: true, +// TestMux tests multiplexing protocols +// using the same listener. +func TestMux(t *testing.T) { + utils.InitLoggerForTests(testing.Verbose()) + + _, signer, err := utils.CreateCertificate("foo", ssh.HostCert) + assert.Nil(t, err) + + // TestMux tests basic use case of multiplexing TLS + // and SSH on the same listener socket + t.Run("TLSSSH", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) + + mux, err := New(Config{ + Listener: listener, + EnableProxyProtocol: true, + }) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + backend1 := &httptest.Server{ + Listener: mux.TLS(), + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "backend 1") + }), + }, + } + backend1.StartTLS() + defer backend1.Close() + + called := false + sshHandler := sshutils.NewChanHandlerFunc(func(_ context.Context, _ *sshutils.ConnectionContext, nch ssh.NewChannel) { + called = true + err := nch.Reject(ssh.Prohibited, "nothing to see here") + assert.Nil(t, err) + }) + + srv, err := sshutils.NewServer( + "test", + utils.NetAddr{AddrNetwork: "tcp", Addr: "localhost:0"}, + sshHandler, + []ssh.Signer{signer}, + sshutils.AuthMethods{Password: pass("abc123")}, + ) + assert.Nil(t, err) + go srv.Serve(mux.SSH()) + defer srv.Close() + clt, err := ssh.Dial("tcp", listener.Addr().String(), &ssh.ClientConfig{ + Auth: []ssh.AuthMethod{ssh.Password("abc123")}, + Timeout: time.Second, + HostKeyCallback: ssh.FixedHostKey(signer.PublicKey()), + }) + assert.Nil(t, err) + defer clt.Close() + + // call new session to initiate opening new channel + _, err = clt.NewSession() + assert.NotNil(t, err) + // make sure the channel handler was called OK + assert.Equal(t, called, true) + + client := testClient(backend1) + re, err := client.Get(backend1.URL) + assert.Nil(t, err) + defer re.Body.Close() + bytes, err := ioutil.ReadAll(re.Body) + assert.Nil(t, err) + assert.Equal(t, string(bytes), "backend 1") + + // Close mux, new requests should fail + mux.Close() + mux.Wait() + + // use new client to use new connection pool + client = testClient(backend1) + re, err = client.Get(backend1.URL) + if err == nil { + re.Body.Close() + } + assert.NotNil(t, err) }) - c.Assert(err, check.IsNil) - go mux.Serve() - defer mux.Close() - - backend1 := &httptest.Server{ - Listener: mux.TLS(), - Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, r.RemoteAddr) - }), - }, - } - backend1.StartTLS() - defer backend1.Close() - - remoteAddr := net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000} - proxyLine := ProxyLine{ - Protocol: TCP4, - Source: remoteAddr, - Destination: net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 9000}, - } - parsedURL, err := url.Parse(backend1.URL) - c.Assert(err, check.IsNil) - - conn, err := net.Dial("tcp", parsedURL.Host) - c.Assert(err, check.IsNil) - defer conn.Close() - // send proxy line first before establishing TLS connection - _, err = fmt.Fprint(conn, proxyLine.String()) - c.Assert(err, check.IsNil) - - // upgrade connection to TLS - tlsConn := tls.Client(conn, clientConfig(backend1)) - defer tlsConn.Close() - - // make sure the TLS call succeeded and we got remote address - // correctly - out, err := utils.RoundtripWithConn(tlsConn) - c.Assert(err, check.IsNil) - c.Assert(out, check.Equals, remoteAddr.String()) -} - -// TestDisabledProxy makes sure the connection gets dropped -// when Proxy line support protocol is turned off -func (s *MuxSuite) TestDisabledProxy(c *check.C) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, check.IsNil) + // ProxyLine tests proxy line protocol + t.Run("ProxyLine", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) + + mux, err := New(Config{ + Listener: listener, + EnableProxyProtocol: true, + }) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + backend1 := &httptest.Server{ + Listener: mux.TLS(), + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, r.RemoteAddr) + }), + }, + } + backend1.StartTLS() + defer backend1.Close() + + remoteAddr := net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000} + proxyLine := ProxyLine{ + Protocol: TCP4, + Source: remoteAddr, + Destination: net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 9000}, + } - mux, err := New(Config{ - Listener: listener, - EnableProxyProtocol: false, + parsedURL, err := url.Parse(backend1.URL) + assert.Nil(t, err) + + conn, err := net.Dial("tcp", parsedURL.Host) + assert.Nil(t, err) + defer conn.Close() + // send proxy line first before establishing TLS connection + _, err = fmt.Fprint(conn, proxyLine.String()) + assert.Nil(t, err) + + // upgrade connection to TLS + tlsConn := tls.Client(conn, clientConfig(backend1)) + defer tlsConn.Close() + + // make sure the TLS call succeeded and we got remote address + // correctly + out, err := utils.RoundtripWithConn(tlsConn) + assert.Nil(t, err) + assert.Equal(t, out, remoteAddr.String()) }) - c.Assert(err, check.IsNil) - go mux.Serve() - defer mux.Close() - - backend1 := &httptest.Server{ - Listener: mux.TLS(), - Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, r.RemoteAddr) - }), - }, - } - backend1.StartTLS() - defer backend1.Close() - - remoteAddr := net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000} - proxyLine := ProxyLine{ - Protocol: TCP4, - Source: remoteAddr, - Destination: net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 9000}, - } - - parsedURL, err := url.Parse(backend1.URL) - c.Assert(err, check.IsNil) - conn, err := net.Dial("tcp", parsedURL.Host) - c.Assert(err, check.IsNil) - defer conn.Close() - // send proxy line first before establishing TLS connection - _, err = fmt.Fprint(conn, proxyLine.String()) - c.Assert(err, check.IsNil) + // TestDisabledProxy makes sure the connection gets dropped + // when Proxy line support protocol is turned off + t.Run("DisabledProxy", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) + + mux, err := New(Config{ + Listener: listener, + EnableProxyProtocol: false, + }) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + backend1 := &httptest.Server{ + Listener: mux.TLS(), + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, r.RemoteAddr) + }), + }, + } + backend1.StartTLS() + defer backend1.Close() + + remoteAddr := net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000} + proxyLine := ProxyLine{ + Protocol: TCP4, + Source: remoteAddr, + Destination: net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 9000}, + } - // upgrade connection to TLS - tlsConn := tls.Client(conn, clientConfig(backend1)) - defer tlsConn.Close() + parsedURL, err := url.Parse(backend1.URL) + assert.Nil(t, err) - // make sure the TLS call failed - _, err = utils.RoundtripWithConn(tlsConn) - c.Assert(err, check.NotNil) -} + conn, err := net.Dial("tcp", parsedURL.Host) + assert.Nil(t, err) + defer conn.Close() + // send proxy line first before establishing TLS connection + _, err = fmt.Fprint(conn, proxyLine.String()) + assert.Nil(t, err) -// TestTimeout tests client timeout - client dials, but writes nothing -// make sure server hangs up -func (s *MuxSuite) TestTimeout(c *check.C) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, check.IsNil) + // upgrade connection to TLS + tlsConn := tls.Client(conn, clientConfig(backend1)) + defer tlsConn.Close() - config := Config{ - Listener: listener, - ReadDeadline: time.Millisecond, - EnableProxyProtocol: true, - } - mux, err := New(config) - c.Assert(err, check.IsNil) - go mux.Serve() - defer mux.Close() - - backend1 := &httptest.Server{ - Listener: mux.TLS(), - Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, r.RemoteAddr) - }), - }, - } - backend1.StartTLS() - defer backend1.Close() + // make sure the TLS call failed + _, err = utils.RoundtripWithConn(tlsConn) + assert.NotNil(t, err) + }) - parsedURL, err := url.Parse(backend1.URL) - c.Assert(err, check.IsNil) + // Timeout tests client timeout - client dials, but writes nothing + // make sure server hangs up + t.Run("Timeout", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) - conn, err := net.Dial("tcp", parsedURL.Host) - c.Assert(err, check.IsNil) - defer conn.Close() + config := Config{ + Listener: listener, + ReadDeadline: time.Millisecond, + EnableProxyProtocol: true, + } + mux, err := New(config) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + backend1 := &httptest.Server{ + Listener: mux.TLS(), + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, r.RemoteAddr) + }), + }, + } + backend1.StartTLS() + defer backend1.Close() - time.Sleep(config.ReadDeadline + 5*time.Millisecond) - // upgrade connection to TLS - tlsConn := tls.Client(conn, clientConfig(backend1)) - defer tlsConn.Close() + parsedURL, err := url.Parse(backend1.URL) + assert.Nil(t, err) - // roundtrip should fail on the timeout - _, err = utils.RoundtripWithConn(tlsConn) - c.Assert(err, check.NotNil) -} + conn, err := net.Dial("tcp", parsedURL.Host) + assert.Nil(t, err) + defer conn.Close() -// TestUnknownProtocol make sure that multiplexer closes connection -// with unknown protocol -func (s *MuxSuite) TestUnknownProtocol(c *check.C) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, check.IsNil) + time.Sleep(config.ReadDeadline + 5*time.Millisecond) + // upgrade connection to TLS + tlsConn := tls.Client(conn, clientConfig(backend1)) + defer tlsConn.Close() - mux, err := New(Config{ - Listener: listener, - EnableProxyProtocol: true, + // roundtrip should fail on the timeout + _, err = utils.RoundtripWithConn(tlsConn) + assert.NotNil(t, err) }) - c.Assert(err, check.IsNil) - go mux.Serve() - defer mux.Close() - - conn, err := net.Dial("tcp", listener.Addr().String()) - c.Assert(err, check.IsNil) - defer conn.Close() - - // try plain HTTP - _, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n") - c.Assert(err, check.IsNil) - - // connection should be closed - _, err = conn.Read(make([]byte, 1)) - c.Assert(err, check.Equals, io.EOF) -} - -// TestDisableSSH disables SSH -func (s *MuxSuite) TestDisableSSH(c *check.C) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, check.IsNil) - mux, err := New(Config{ - Listener: listener, - EnableProxyProtocol: true, - DisableSSH: true, + // UnknownProtocol make sure that multiplexer closes connection + // with unknown protocol + t.Run("UnknownProtocol", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) + + mux, err := New(Config{ + Listener: listener, + EnableProxyProtocol: true, + }) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + conn, err := net.Dial("tcp", listener.Addr().String()) + assert.Nil(t, err) + defer conn.Close() + + // try plain HTTP + _, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n") + assert.Nil(t, err) + + // connection should be closed + _, err = conn.Read(make([]byte, 1)) + assert.Equal(t, err, io.EOF) }) - c.Assert(err, check.IsNil) - go mux.Serve() - defer mux.Close() - - backend1 := &httptest.Server{ - Listener: mux.TLS(), - Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "backend 1") - }), - }, - } - backend1.StartTLS() - defer backend1.Close() - _, err = ssh.Dial("tcp", listener.Addr().String(), &ssh.ClientConfig{ - Auth: []ssh.AuthMethod{ssh.Password("abc123")}, - Timeout: time.Second, - HostKeyCallback: ssh.FixedHostKey(s.signer.PublicKey()), + // DisableSSH disables SSH + t.Run("DisableSSH", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) + + mux, err := New(Config{ + Listener: listener, + EnableProxyProtocol: true, + DisableSSH: true, + }) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + backend1 := &httptest.Server{ + Listener: mux.TLS(), + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "backend 1") + }), + }, + } + backend1.StartTLS() + defer backend1.Close() + + _, err = ssh.Dial("tcp", listener.Addr().String(), &ssh.ClientConfig{ + Auth: []ssh.AuthMethod{ssh.Password("abc123")}, + Timeout: time.Second, + HostKeyCallback: ssh.FixedHostKey(signer.PublicKey()), + }) + assert.NotNil(t, err) + + // TLS requests will succeed + client := testClient(backend1) + re, err := client.Get(backend1.URL) + assert.Nil(t, err) + defer re.Body.Close() + bytes, err := ioutil.ReadAll(re.Body) + assert.Nil(t, err) + assert.Equal(t, string(bytes), "backend 1") + + // Close mux, new requests should fail + mux.Close() + mux.Wait() + + // use new client to use new connection pool + client = testClient(backend1) + re, err = client.Get(backend1.URL) + if err == nil { + re.Body.Close() + } + assert.NotNil(t, err) }) - c.Assert(err, check.NotNil) - - // TLS requests will succeed - client := testClient(backend1) - re, err := client.Get(backend1.URL) - c.Assert(err, check.IsNil) - defer re.Body.Close() - bytes, err := ioutil.ReadAll(re.Body) - c.Assert(err, check.IsNil) - c.Assert(string(bytes), check.Equals, "backend 1") - - // Close mux, new requests should fail - mux.Close() - mux.Wait() - - // use new client to use new connection pool - client = testClient(backend1) - re, err = client.Get(backend1.URL) - if err == nil { - re.Body.Close() - } - c.Assert(err, check.NotNil) -} -// TestDisableTLS tests scenario with disabled TLS -func (s *MuxSuite) TestDisableTLS(c *check.C) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, check.IsNil) + // TestDisableTLS tests scenario with disabled TLS + t.Run("DisableTLS", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) + + mux, err := New(Config{ + Listener: listener, + EnableProxyProtocol: true, + DisableTLS: true, + }) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + backend1 := &httptest.Server{ + Listener: mux.TLS(), + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "backend 1") + }), + }, + } + backend1.StartTLS() + defer backend1.Close() + + called := false + sshHandler := sshutils.NewChanHandlerFunc(func(_ context.Context, _ *sshutils.ConnectionContext, nch ssh.NewChannel) { + called = true + err := nch.Reject(ssh.Prohibited, "nothing to see here") + assert.Nil(t, err) + }) + + srv, err := sshutils.NewServer( + "test", + utils.NetAddr{AddrNetwork: "tcp", Addr: "localhost:0"}, + sshHandler, + []ssh.Signer{signer}, + sshutils.AuthMethods{Password: pass("abc123")}, + ) + assert.Nil(t, err) + go srv.Serve(mux.SSH()) + defer srv.Close() + clt, err := ssh.Dial("tcp", listener.Addr().String(), &ssh.ClientConfig{ + Auth: []ssh.AuthMethod{ssh.Password("abc123")}, + Timeout: time.Second, + HostKeyCallback: ssh.FixedHostKey(signer.PublicKey()), + }) + assert.Nil(t, err) + defer clt.Close() + + // call new session to initiate opening new channel + _, err = clt.NewSession() + assert.NotNil(t, err) + // make sure the channel handler was called OK + assert.Equal(t, called, true) + + client := testClient(backend1) + re, err := client.Get(backend1.URL) + if err == nil { + re.Body.Close() + } + assert.NotNil(t, err) - mux, err := New(Config{ - Listener: listener, - EnableProxyProtocol: true, - DisableTLS: true, - }) - c.Assert(err, check.IsNil) - go mux.Serve() - defer mux.Close() - - backend1 := &httptest.Server{ - Listener: mux.TLS(), - Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "backend 1") - }), - }, - } - backend1.StartTLS() - defer backend1.Close() - - called := false - sshHandler := sshutils.NewChanHandlerFunc(func(_ context.Context, _ *sshutils.ConnectionContext, nch ssh.NewChannel) { - called = true - err := nch.Reject(ssh.Prohibited, "nothing to see here") - c.Assert(err, check.IsNil) + // Close mux, new requests should fail + mux.Close() + mux.Wait() }) - srv, err := sshutils.NewServer( - "test", - utils.NetAddr{AddrNetwork: "tcp", Addr: "localhost:0"}, - sshHandler, - []ssh.Signer{s.signer}, - sshutils.AuthMethods{Password: pass("abc123")}, - ) - c.Assert(err, check.IsNil) - go srv.Serve(mux.SSH()) - defer srv.Close() - clt, err := ssh.Dial("tcp", listener.Addr().String(), &ssh.ClientConfig{ - Auth: []ssh.AuthMethod{ssh.Password("abc123")}, - Timeout: time.Second, - HostKeyCallback: ssh.FixedHostKey(s.signer.PublicKey()), + // NextProto tests multiplexing using NextProto selector + t.Run("NextProto", func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + assert.Nil(t, err) + + mux, err := New(Config{ + Listener: listener, + EnableProxyProtocol: true, + }) + assert.Nil(t, err) + go mux.Serve() + defer mux.Close() + + cfg, err := fixtures.LocalTLSConfig() + assert.Nil(t, err) + + tlsLis, err := NewTLSListener(TLSListenerConfig{ + Listener: tls.NewListener(mux.TLS(), cfg.TLS), + }) + assert.Nil(t, err) + go tlsLis.Serve() + + opts := []grpc.ServerOption{ + grpc.Creds(&httplib.TLSCreds{ + Config: cfg.TLS, + })} + s := grpc.NewServer(opts...) + test.RegisterPingerServer(s, &server{}) + + errCh := make(chan error, 2) + + go func() { + errCh <- s.Serve(tlsLis.HTTP2()) + }() + + httpServer := http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "http backend") + }), + } + go func() { + err := httpServer.Serve(tlsLis.HTTP()) + if err == nil || err == http.ErrServerClosed { + errCh <- nil + return + } + errCh <- err + }() + + url := fmt.Sprintf("https://%s", listener.Addr()) + client := cfg.NewClient() + re, err := client.Get(url) + assert.Nil(t, err) + defer re.Body.Close() + bytes, err := ioutil.ReadAll(re.Body) + assert.Nil(t, err) + assert.Equal(t, string(bytes), "http backend") + + creds := credentials.NewClientTLSFromCert(cfg.CertPool, "") + + // Set up a connection to the server. + conn, err := grpc.Dial(listener.Addr().String(), grpc.WithTransportCredentials(creds), grpc.WithBlock()) + assert.Nil(t, err) + defer conn.Close() + + gclient := test.NewPingerClient(conn) + + out, err := gclient.Ping(context.TODO(), &test.Request{}) + assert.Nil(t, err) + assert.Equal(t, out.GetPayload(), "grpc backend") + + // Close mux, new requests should fail + mux.Close() + mux.Wait() + + // use new client to use new connection pool + client = cfg.NewClient() + re, err = client.Get(url) + if err == nil { + re.Body.Close() + } + assert.NotNil(t, err) + + httpServer.Close() + s.Stop() + // wait for both servers to finish + for i := 0; i < 2; i++ { + err := <-errCh + assert.Nil(t, err) + } }) - c.Assert(err, check.IsNil) - defer clt.Close() - - // call new session to initiate opening new channel - _, err = clt.NewSession() - c.Assert(err, check.NotNil) - // make sure the channel handler was called OK - c.Assert(called, check.Equals, true) - - client := testClient(backend1) - re, err := client.Get(backend1.URL) - if err == nil { - re.Body.Close() - } - c.Assert(err, check.NotNil) +} + +// server is used to implement test.PingerServer +type server struct { +} - // Close mux, new requests should fail - mux.Close() - mux.Wait() +func (s *server) Ping(ctx context.Context, req *test.Request) (*test.Response, error) { + return &test.Response{Payload: "grpc backend"}, nil } // clientConfig returns tls client config from test http server diff --git a/lib/multiplexer/ping.proto b/lib/multiplexer/ping.proto new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/lib/multiplexer/test/ping.pb.go b/lib/multiplexer/test/ping.pb.go new file mode 100644 index 0000000000000..7f794aefd4eb8 --- /dev/null +++ b/lib/multiplexer/test/ping.pb.go @@ -0,0 +1,577 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ping.proto + +package test + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request struct { + Payload string `protobuf:"bytes,1,opt,name=Payload,proto3" json:"Payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_ping_f8a9b627ed078775, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return m.Size() +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +type Response struct { + Payload string `protobuf:"bytes,1,opt,name=Payload,proto3" json:"Payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_ping_f8a9b627ed078775, []int{1} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(dst, src) +} +func (m *Response) XXX_Size() int { + return m.Size() +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +func init() { + proto.RegisterType((*Request)(nil), "test.Request") + proto.RegisterType((*Response)(nil), "test.Response") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Pinger service + +type PingerClient interface { + Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) +} + +type pingerClient struct { + cc *grpc.ClientConn +} + +func NewPingerClient(cc *grpc.ClientConn) PingerClient { + return &pingerClient{cc} +} + +func (c *pingerClient) Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) { + out := new(Response) + err := c.cc.Invoke(ctx, "/test.Pinger/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Pinger service + +type PingerServer interface { + Ping(context.Context, *Request) (*Response, error) +} + +func RegisterPingerServer(s *grpc.Server, srv PingerServer) { + s.RegisterService(&_Pinger_serviceDesc, srv) +} + +func _Pinger_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PingerServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/test.Pinger/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PingerServer).Ping(ctx, req.(*Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Pinger_serviceDesc = grpc.ServiceDesc{ + ServiceName: "test.Pinger", + HandlerType: (*PingerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _Pinger_Ping_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ping.proto", +} + +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPing(dAtA, i, uint64(len(m.Payload))) + i += copy(dAtA[i:], m.Payload) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPing(dAtA, i, uint64(len(m.Payload))) + i += copy(dAtA[i:], m.Payload) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintPing(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Request) Size() (n int) { + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovPing(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Response) Size() (n int) { + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovPing(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPing(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPing(x uint64) (n int) { + return sovPing(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPing + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPing(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPing + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Response) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Response: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPing + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPing(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPing + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPing(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPing + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPing + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPing(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPing = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPing = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("ping.proto", fileDescriptor_ping_f8a9b627ed078775) } + +var fileDescriptor_ping_f8a9b627ed078775 = []byte{ + // 141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc8, 0xcc, 0x4b, + 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x29, 0x49, 0x2d, 0x2e, 0x51, 0x52, 0xe6, 0x62, + 0x0f, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x92, 0xe0, 0x62, 0x0f, 0x48, 0xac, 0xcc, 0xc9, + 0x4f, 0x4c, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x82, 0x71, 0x95, 0x54, 0xb8, 0x38, 0x82, + 0x52, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x71, 0xab, 0x32, 0xd2, 0xe7, 0x62, 0x0b, 0xc8, 0xcc, + 0x4b, 0x4f, 0x2d, 0x12, 0x52, 0xe5, 0x62, 0x01, 0xb1, 0x84, 0x78, 0xf5, 0x40, 0x76, 0xe8, 0x41, + 0x2d, 0x90, 0xe2, 0x83, 0x71, 0x21, 0x46, 0x39, 0x09, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0x9d, 0x66, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x3f, 0x92, 0x85, 0xa8, 0x00, 0x00, 0x00, +} diff --git a/lib/multiplexer/test/ping.proto b/lib/multiplexer/test/ping.proto new file mode 100644 index 0000000000000..41a0ea49c6a3f --- /dev/null +++ b/lib/multiplexer/test/ping.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package test; + +message Request { string Payload = 1; } + +message Response { string Payload = 1; } + +// Pinger is a service used in tests +service Pinger { rpc Ping(Request) returns (Response); } diff --git a/lib/multiplexer/tls.go b/lib/multiplexer/tls.go new file mode 100644 index 0000000000000..7a807f1c8ffa8 --- /dev/null +++ b/lib/multiplexer/tls.go @@ -0,0 +1,189 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiplexer + +import ( + "context" + "crypto/tls" + "io" + "net" + "sync/atomic" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/defaults" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + log "github.com/sirupsen/logrus" + "golang.org/x/net/http2" +) + +// TLSListenerConfig specifies listener configuration +type TLSListenerConfig struct { + // Listener is the listener returning *tls.Conn + // connections on Accept + Listener net.Listener + // ID is an identifier used for debugging purposes + ID string + // ReadDeadline is a connection read deadline, + // set to defaults.ReadHeadersTimeout if unspecified + ReadDeadline time.Duration + // Clock is a clock to override in tests, set to real time clock + // by default + Clock clockwork.Clock +} + +// CheckAndSetDefaults verifies configuration and sets defaults +func (c *TLSListenerConfig) CheckAndSetDefaults() error { + if c.Listener == nil { + return trace.BadParameter("missing parameter Listener") + } + if c.ReadDeadline == 0 { + c.ReadDeadline = defaults.ReadHeadersTimeout + } + if c.Clock == nil { + c.Clock = clockwork.NewRealClock() + } + return nil +} + +// NewTLSListener returns a new TLS listener +func NewTLSListener(cfg TLSListenerConfig) (*TLSListener, error) { + if err := cfg.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + context, cancel := context.WithCancel(context.TODO()) + return &TLSListener{ + log: log.WithFields(log.Fields{ + trace.Component: teleport.Component("mxtls", cfg.ID), + }), + cfg: cfg, + http2Listener: newListener(context, cfg.Listener.Addr()), + httpListener: newListener(context, cfg.Listener.Addr()), + cancel: cancel, + context: context, + }, nil +} + +// TLSListener wraps tls.Listener and detects negotiated protocol +// (assuming it's either http/1.1 or http/2) +// and forwards the appropriate responses to either HTTP/1.1 or HTTP/2 +// listeners +type TLSListener struct { + log *log.Entry + cfg TLSListenerConfig + http2Listener *Listener + httpListener *Listener + cancel context.CancelFunc + context context.Context + isClosed int32 +} + +// HTTP2 returns HTTP2 listener +func (l *TLSListener) HTTP2() net.Listener { + return l.http2Listener +} + +// HTTP returns HTTP listener +func (l *TLSListener) HTTP() net.Listener { + return l.httpListener +} + +// Serve accepts and forwards tls.Conn connections +func (l *TLSListener) Serve() error { + backoffTimer := time.NewTicker(5 * time.Second) + defer backoffTimer.Stop() + for { + conn, err := l.cfg.Listener.Accept() + if err == nil { + tlsConn, ok := conn.(*tls.Conn) + if !ok { + conn.Close() + log.Errorf("Expected tls.Conn, got %T, internal usage error.", conn) + continue + } + go l.detectAndForward(tlsConn) + continue + } + if atomic.LoadInt32(&l.isClosed) == 1 { + return trace.ConnectionProblem(nil, "listener is closed") + } + select { + case <-backoffTimer.C: + case <-l.context.Done(): + return trace.ConnectionProblem(nil, "listener is closed") + } + } +} + +func (l *TLSListener) detectAndForward(conn *tls.Conn) { + err := conn.SetReadDeadline(l.cfg.Clock.Now().Add(l.cfg.ReadDeadline)) + if err != nil { + l.log.WithError(err).Debugf("Failed to set connection deadline.") + conn.Close() + return + } + if err := conn.Handshake(); err != nil { + if trace.Unwrap(err) != io.EOF { + l.log.WithError(err).Warning("Handshake failed.") + } + conn.Close() + return + } + + err = conn.SetReadDeadline(time.Time{}) + if err != nil { + l.log.WithError(err).Warning("Failed to reset read deadline") + conn.Close() + return + } + + switch conn.ConnectionState().NegotiatedProtocol { + case http2.NextProtoTLS: + select { + case l.http2Listener.connC <- conn: + case <-l.context.Done(): + conn.Close() + return + } + case teleport.HTTPNextProtoTLS, "": + select { + case l.httpListener.connC <- conn: + case <-l.context.Done(): + conn.Close() + return + } + default: + conn.Close() + l.log.WithError(err).Errorf("unsupported protocol: %v", conn.ConnectionState().NegotiatedProtocol) + return + } +} + +// Close closes the listener. +// Any blocked Accept operations will be unblocked and return errors. +func (l *TLSListener) Close() error { + defer l.cancel() + atomic.StoreInt32(&l.isClosed, 1) + return l.cfg.Listener.Close() +} + +// Addr returns the listener's network address. +func (l *TLSListener) Addr() net.Addr { + return l.cfg.Listener.Addr() +} diff --git a/lib/multiplexer/wrappers.go b/lib/multiplexer/wrappers.go index 83c3c8d3e7a87..ee70afaf4471b 100644 --- a/lib/multiplexer/wrappers.go +++ b/lib/multiplexer/wrappers.go @@ -60,7 +60,7 @@ func newListener(parent context.Context, addr net.Addr) *Listener { context, cancel := context.WithCancel(parent) return &Listener{ addr: addr, - connC: make(chan *Conn), + connC: make(chan net.Conn), cancel: cancel, context: context, } @@ -70,7 +70,7 @@ func newListener(parent context.Context, addr net.Addr) *Listener { // connections from multiplexer based on the connection type type Listener struct { addr net.Addr - connC chan *Conn + connC chan net.Conn cancel context.CancelFunc context context.Context } diff --git a/lib/reversetunnel/localsite.go b/lib/reversetunnel/localsite.go index 1923a335168f1..c089576a3bfcc 100644 --- a/lib/reversetunnel/localsite.go +++ b/lib/reversetunnel/localsite.go @@ -169,7 +169,7 @@ func (s *localSite) Dial(params DialParams) (net.Conn, error) { if err != nil { return nil, trace.Wrap(err) } - if clusterConfig.GetSessionRecording() == services.RecordAtProxy { + if services.IsRecordAtProxy(clusterConfig.GetSessionRecording()) { return s.dialWithAgent(params) } @@ -235,6 +235,8 @@ func (s *localSite) dialWithAgent(params DialParams) (net.Conn, error) { Address: params.Address, UseTunnel: useTunnel, HostUUID: s.srv.ID, + Emitter: s.srv.Config.Emitter, + ParentContext: s.srv.Context, } remoteServer, err := forward.New(serverConfig) if err != nil { diff --git a/lib/reversetunnel/remotesite.go b/lib/reversetunnel/remotesite.go index e253557732e04..baeb406a1e717 100644 --- a/lib/reversetunnel/remotesite.go +++ b/lib/reversetunnel/remotesite.go @@ -508,7 +508,7 @@ func (s *remoteSite) Dial(params DialParams) (net.Conn, error) { // If the proxy is in recording mode use the agent to dial and build a // in-memory forwarding server. - if clusterConfig.GetSessionRecording() == services.RecordAtProxy { + if services.IsRecordAtProxy(clusterConfig.GetSessionRecording()) { return s.dialWithAgent(params) } return s.DialTCP(params) @@ -577,6 +577,8 @@ func (s *remoteSite) dialWithAgent(params DialParams) (net.Conn, error) { UseTunnel: targetConn.UseTunnel(), FIPS: s.srv.FIPS, HostUUID: s.srv.ID, + Emitter: s.srv.Config.Emitter, + ParentContext: s.srv.Context, } remoteServer, err := forward.New(serverConfig) if err != nil { diff --git a/lib/reversetunnel/srv.go b/lib/reversetunnel/srv.go index 1889f0c600d34..8b8b27bc8c8d2 100644 --- a/lib/reversetunnel/srv.go +++ b/lib/reversetunnel/srv.go @@ -1,5 +1,5 @@ /* -Copyright 2015 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,6 +28,7 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/limiter" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/sshca" @@ -183,6 +184,9 @@ type Config struct { // FIPS means Teleport was started in a FedRAMP/FIPS 140-2 compliant // configuration. FIPS bool + + // Emitter is event emitter + Emitter events.StreamEmitter } // CheckAndSetDefaults checks parameters and sets default values @@ -202,6 +206,9 @@ func (cfg *Config) CheckAndSetDefaults() error { if cfg.DataDir == "" { return trace.BadParameter("missing parameter DataDir") } + if cfg.Emitter == nil { + return trace.BadParameter("missing parameter Emitter") + } if cfg.Context == nil { cfg.Context = context.TODO() } diff --git a/lib/service/cfg.go b/lib/service/cfg.go index 9c6de592108b8..83cc35175395e 100644 --- a/lib/service/cfg.go +++ b/lib/service/cfg.go @@ -155,7 +155,7 @@ type Config struct { // UploadEventsC is a channel for upload events // used in tests - UploadEventsC chan *events.UploadEvent `json:"-"` + UploadEventsC chan events.UploadEvent `json:"-"` // FileDescriptors is an optional list of file descriptors for the process // to inherit and use for listeners, used for in-process updates. diff --git a/lib/service/service.go b/lib/service/service.go index 25fd33349969d..822e447acca49 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -1,5 +1,5 @@ /* -Copyright 2015-2019 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -261,7 +261,7 @@ type keyPairKey struct { // processIndex is an internal process index // to help differentiate between two different teleport processes // during in-process reload. -var processID int32 = 0 +var processID int32 func nextProcessID() int32 { return atomic.AddInt32(&processID, 1) @@ -767,10 +767,27 @@ func adminCreds() (*int, *int, error) { // initUploadHandler initializes upload handler based on the config settings, // currently the only upload handler supported is S3 -// the call can return trace.NotFOund if no upload handler is setup -func initUploadHandler(auditConfig services.AuditConfig) (events.UploadHandler, error) { +// the call can return trace.NotFound if no upload handler is setup +func initUploadHandler(auditConfig services.AuditConfig, dataDir string) (events.MultipartHandler, error) { if auditConfig.AuditSessionsURI == "" { - return nil, trace.NotFound("no upload handler is setup") + recordsDir := filepath.Join(dataDir, events.RecordsDir) + if err := os.MkdirAll(recordsDir, teleport.SharedDirMode); err != nil { + return nil, trace.ConvertSystemError(err) + } + handler, err := filesessions.NewHandler(filesessions.Config{ + Directory: recordsDir, + }) + if err != nil { + return nil, trace.Wrap(err) + } + wrapper, err := events.NewLegacyHandler(events.LegacyHandlerConfig{ + Handler: handler, + Dir: dataDir, + }) + if err != nil { + return nil, trace.Wrap(err) + } + return wrapper, nil } uri, err := utils.ParseSessionsURI(auditConfig.AuditSessionsURI) if err != nil { @@ -924,6 +941,9 @@ func (process *TeleportProcess) initAuthService() error { } process.backend = b + var emitter events.Emitter + var streamer events.Streamer + var uploadHandler events.MultipartHandler // create the audit log, which will be consuming (and recording) all events // and recording all sessions. if cfg.Auth.NoAudit { @@ -934,6 +954,8 @@ func (process *TeleportProcess) initAuthService() error { "turned off. This is dangerous, you will not be able to view audit events " + "or save and playback recorded sessions." process.Warn(warningMessage) + discard := events.NewDiscardEmitter() + emitter, streamer = discard, discard } else { // check if session recording has been disabled. note, we will continue // logging audit events, we just won't record sessions. @@ -947,18 +969,27 @@ func (process *TeleportProcess) initAuthService() error { } auditConfig := cfg.Auth.ClusterConfig.GetAuditConfig() - uploadHandler, err := initUploadHandler(auditConfig) + uploadHandler, err := initUploadHandler( + auditConfig, filepath.Join(cfg.DataDir, teleport.LogsDir)) if err != nil { if !trace.IsNotFound(err) { return trace.Wrap(err) } } + streamer, err = events.NewProtoStreamer(events.ProtoStreamerConfig{ + Uploader: uploadHandler, + }) + if err != nil { + return trace.Wrap(err) + } // initialize external loggers. may return (nil, nil) if no // external loggers have been defined. externalLog, err := initExternalLog(auditConfig) if err != nil { - return trace.Wrap(err) + if !trace.IsNotFound(err) { + return trace.Wrap(err) + } } auditServiceConfig := events.AuditLogConfig{ @@ -973,10 +1004,49 @@ func (process *TeleportProcess) initAuthService() error { if err != nil { return trace.Wrap(err) } - process.auditLog, err = events.NewAuditLog(auditServiceConfig) + localLog, err := events.NewAuditLog(auditServiceConfig) if err != nil { return trace.Wrap(err) } + process.auditLog = localLog + if externalLog != nil { + externalEmitter, ok := externalLog.(events.Emitter) + if !ok { + return trace.BadParameter("expected emitter, but %T does not emit", externalLog) + } + emitter = externalEmitter + } else { + emitter = localLog + } + } + + // Upload completer is responsible for checking for initiated but abandoned + // session uploads and completing them + var uploadCompleter *events.UploadCompleter + if uploadHandler != nil { + uploadCompleter, err = events.NewUploadCompleter(events.UploadCompleterConfig{ + Uploader: uploadHandler, + Component: teleport.ComponentAuth, + }) + if err != nil { + return trace.Wrap(err) + } + } + + checkingEmitter, err := events.NewCheckingEmitter(events.CheckingEmitterConfig{ + Inner: events.NewMultiEmitter(events.NewLoggingEmitter(), emitter), + Clock: process.Clock, + }) + if err != nil { + return trace.Wrap(err) + } + + checkingStreamer, err := events.NewCheckingStreamer(events.CheckingStreamerConfig{ + Inner: streamer, + Clock: process.Clock, + }) + if err != nil { + return trace.Wrap(err) } // first, create the AuthServer @@ -1006,6 +1076,8 @@ func (process *TeleportProcess) initAuthService() error { AuditLog: process.auditLog, CipherSuites: cfg.CipherSuites, CASigningAlg: cfg.CASignatureAlgorithm, + Emitter: checkingEmitter, + Streamer: events.NewReportingStreamer(checkingStreamer, process.Config.UploadEventsC), }) if err != nil { return trace.Wrap(err) @@ -1034,6 +1106,7 @@ func (process *TeleportProcess) initAuthService() error { SessionService: sessionService, Authorizer: authorizer, AuditLog: process.auditLog, + Emitter: checkingEmitter, } var authCache auth.AuthCache @@ -1063,16 +1136,6 @@ func (process *TeleportProcess) initAuthService() error { if err != nil { return trace.Wrap(err) } - tlsServer, err := auth.NewTLSServer(auth.TLSServerConfig{ - TLS: tlsConfig, - APIConfig: *apiConf, - LimiterConfig: cfg.Auth.Limiter, - AccessPoint: authCache, - Component: teleport.Component(teleport.ComponentAuth, process.id), - }) - if err != nil { - return trace.Wrap(err) - } // auth server listens on SSH and TLS, reusing the same socket listener, err := process.importOrCreateListener(listenerAuthSSH, cfg.Auth.SSHAddr.Addr) if err != nil { @@ -1094,13 +1157,25 @@ func (process *TeleportProcess) initAuthService() error { return trace.Wrap(err) } go mux.Serve() + tlsServer, err := auth.NewTLSServer(auth.TLSServerConfig{ + TLS: tlsConfig, + APIConfig: *apiConf, + LimiterConfig: cfg.Auth.Limiter, + AccessPoint: authCache, + Component: teleport.Component(teleport.ComponentAuth, process.id), + ID: process.id, + Listener: mux.TLS(), + }) + if err != nil { + return trace.Wrap(err) + } process.RegisterCriticalFunc("auth.tls", func() error { utils.Consolef(cfg.Console, teleport.ComponentAuth, "Auth service %s:%s is starting on %v.", teleport.Version, teleport.Gitref, cfg.Auth.SSHAddr.Addr) // since tlsServer.Serve is a blocking call, we emit this even right before // the service has started process.BroadcastEvent(Event{Name: AuthTLSReady, Payload: nil}) - err := tlsServer.Serve(mux.TLS()) + err := tlsServer.Serve() if err != nil && err != http.ErrServerClosed { log.Warningf("TLS server exited with error: %v.", err) } @@ -1219,6 +1294,9 @@ func (process *TeleportProcess) initAuthService() error { ctx := payloadContext(payload) warnOnErr(tlsServer.Shutdown(ctx)) } + if uploadCompleter != nil { + warnOnErr(uploadCompleter.Close()) + } log.Info("Exited.") }) return nil @@ -1498,6 +1576,22 @@ func (process *TeleportProcess) initSSH() error { cfg.SSH.Addr = *defaults.SSHServerListenAddr() } + emitter, err := events.NewCheckingEmitter(events.CheckingEmitterConfig{ + Inner: events.NewMultiEmitter(events.NewLoggingEmitter(), conn.Client), + Clock: process.Clock, + }) + if err != nil { + return trace.Wrap(err) + } + + streamer, err := events.NewCheckingStreamer(events.CheckingStreamerConfig{ + Inner: conn.Client, + Clock: process.Clock, + }) + if err != nil { + return trace.Wrap(err) + } + s, err = regular.New(cfg.SSH.Addr, cfg.Hostname, []ssh.Signer{conn.ServerIdentity.KeySigner}, @@ -1507,7 +1601,7 @@ func (process *TeleportProcess) initSSH() error { process.proxyPublicAddr(), regular.SetLimiter(limiter), regular.SetShell(cfg.SSH.Shell), - regular.SetAuditLog(conn.Client), + regular.SetEmitter(&events.StreamerAndEmitter{Emitter: emitter, Streamer: streamer}), regular.SetSessionServer(conn.Client), regular.SetLabels(cfg.SSH.Labels, cfg.SSH.CmdLabels), regular.SetNamespace(namespace), @@ -1660,28 +1754,39 @@ func (process *TeleportProcess) initUploaderService(accessPoint auth.AccessPoint if err != nil { return trace.Wrap(err) } - // prepare dirs for uploader - path := []string{process.Config.DataDir, teleport.LogsDir, teleport.ComponentUpload, events.SessionLogsDir, defaults.Namespace} - for i := 1; i < len(path); i++ { - dir := filepath.Join(path[:i+1]...) - log.Infof("Creating directory %v.", dir) - err := os.Mkdir(dir, 0755) - err = trace.ConvertSystemError(err) - if err != nil { - if !trace.IsAlreadyExists(err) { - return trace.Wrap(err) - } - } - if uid != nil && gid != nil { - log.Infof("Setting directory %v owner to %v:%v.", dir, *uid, *gid) - err := os.Chown(dir, *uid, *gid) + streamingDir := []string{process.Config.DataDir, teleport.LogsDir, teleport.ComponentUpload, events.StreamingLogsDir, defaults.Namespace} + paths := [][]string{ + // DELETE IN (5.1.0) + // this directory will no longer be used after migration to 5.1.0 + []string{process.Config.DataDir, teleport.LogsDir, teleport.ComponentUpload, events.SessionLogsDir, defaults.Namespace}, + // This directory will remain to be used after migration to 5.1.0 + streamingDir, + } + for _, path := range paths { + for i := 1; i < len(path); i++ { + dir := filepath.Join(path[:i+1]...) + log.Infof("Creating directory %v.", dir) + err := os.Mkdir(dir, 0755) + err = trace.ConvertSystemError(err) if err != nil { - return trace.ConvertSystemError(err) + if !trace.IsAlreadyExists(err) { + return trace.Wrap(err) + } + } + if uid != nil && gid != nil { + log.Infof("Setting directory %v owner to %v:%v.", dir, *uid, *gid) + err := os.Chown(dir, *uid, *gid) + if err != nil { + return trace.ConvertSystemError(err) + } } } } + // DELETE IN (5.1.0) + // this uploader was superseded by filesessions.Uploader, + // see below uploader, err := events.NewUploader(events.UploaderConfig{ DataDir: filepath.Join(process.Config.DataDir, teleport.LogsDir), Namespace: defaults.Namespace, @@ -1705,6 +1810,32 @@ func (process *TeleportProcess) initUploaderService(accessPoint auth.AccessPoint warnOnErr(uploader.Stop()) log.Infof("Exited.") }) + + // This uploader supersedes the events.Uploader above, + // that is kept for backwards compatibility purposes for one release. + // Delete this comment once the uploader above is phased out. + fileUploader, err := filesessions.NewUploader(filesessions.UploaderConfig{ + ScanDir: filepath.Join(streamingDir...), + Streamer: accessPoint, + EventsC: process.Config.UploadEventsC, + }) + if err != nil { + return trace.Wrap(err) + } + process.RegisterFunc("fileuploader.service", func() error { + err := fileUploader.Serve() + if err != nil { + log.WithError(err).Errorf("File uploader server exited with error.") + } + return nil + }) + + process.onExit("fileuploader.shutdown", func(payload interface{}) { + log.Infof("File uploader is shutting down.") + warnOnErr(fileUploader.Close()) + log.Infof("File uploader has shut down.") + }) + return nil } @@ -1906,6 +2037,9 @@ func (process *TeleportProcess) initProxy() error { err := process.initProxyEndpoint(conn) if err != nil { + if conn.Client != nil { + warnOnErr(conn.Client.Close()) + } return trace.Wrap(err) } @@ -2063,6 +2197,22 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { trace.Component: teleport.Component(teleport.ComponentReverseTunnelServer, process.id), }) + emitter, err := events.NewCheckingEmitter(events.CheckingEmitterConfig{ + Inner: events.NewMultiEmitter(events.NewLoggingEmitter(), conn.Client), + Clock: process.Clock, + }) + if err != nil { + return trace.Wrap(err) + } + + streamer, err := events.NewCheckingStreamer(events.CheckingStreamerConfig{ + Inner: conn.Client, + Clock: process.Clock, + }) + if err != nil { + return trace.Wrap(err) + } + // register SSH reverse tunnel server that accepts connections // from remote teleport nodes var tsrv reversetunnel.Server @@ -2092,6 +2242,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { DataDir: process.Config.DataDir, PollingPeriod: process.Config.PollingPeriod, FIPS: cfg.FIPS, + Emitter: &events.StreamerAndEmitter{Emitter: emitter, Streamer: streamer}, }) if err != nil { return trace.Wrap(err) @@ -2198,7 +2349,6 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { regular.SetLimiter(proxyLimiter), regular.SetProxyMode(tsrv), regular.SetSessionServer(conn.Client), - regular.SetAuditLog(conn.Client), regular.SetCiphers(cfg.Ciphers), regular.SetKEXAlgorithms(cfg.KEXAlgorithms), regular.SetMACAlgorithms(cfg.MACAlgorithms), @@ -2212,6 +2362,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { process.BroadcastEvent(Event{Name: TeleportOKEvent, Payload: teleport.ComponentProxy}) } }), + regular.SetEmitter(&events.StreamerAndEmitter{Emitter: emitter, Streamer: streamer}), ) if err != nil { return trace.Wrap(err) @@ -2276,7 +2427,6 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { Client: conn.Client, DataDir: cfg.DataDir, AccessPoint: accessPoint, - AuditLog: conn.Client, ServerID: cfg.HostUUID, ClusterOverride: cfg.Proxy.Kube.ClusterOverride, KubeconfigPath: cfg.Proxy.Kube.KubeconfigPath, @@ -2346,6 +2496,11 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { warnOnErr(webHandler.Close()) } } + // Close client after graceful shutdown has been completed, + // to make sure in flight streams are not terminated, + if conn.Client != nil { + warnOnErr(conn.Client.Close()) + } log.Infof("Exited.") }) if err := process.initUploaderService(accessPoint, conn.Client); err != nil { diff --git a/lib/services/clusterconfig.go b/lib/services/clusterconfig.go index 9e93f43dbba0e..32a725a57faa1 100644 --- a/lib/services/clusterconfig.go +++ b/lib/services/clusterconfig.go @@ -171,8 +171,26 @@ const ( // RecordOff is used to disable session recording completely. RecordOff string = "off" + + // RecordAtNodeSync enables the nodes to stream sessions in sync mode + // to the auth server + RecordAtNodeSync string = "node-sync" + + // RecordAtProxySync enables the recording proxy which intercepts and records + // all sessions, streams the records synchronously + RecordAtProxySync string = "proxy-sync" ) +// IsRecordAtProxy returns true if recording is sync or async at proxy +func IsRecordAtProxy(mode string) bool { + return mode == RecordAtProxy || mode == RecordAtProxySync +} + +// IsRecordSync returns true if recording is sync or async for proxy or node +func IsRecordSync(mode string) bool { + return mode == RecordAtProxySync || mode == RecordAtNodeSync +} + const ( // HostKeyCheckYes is the default. The proxy will check the host key of the // target node it connects to. @@ -361,7 +379,7 @@ func (c *ClusterConfigV3) CheckAndSetDefaults() error { } // check if the recording type is valid - all := []string{RecordAtNode, RecordAtProxy, RecordOff} + all := []string{RecordAtNode, RecordAtProxy, RecordAtNodeSync, RecordAtProxySync, RecordOff} ok := utils.SliceContainsStr(all, c.Spec.SessionRecording) if !ok { return trace.BadParameter("session_recording must either be: %v", strings.Join(all, ",")) diff --git a/lib/services/role.go b/lib/services/role.go index e8e460a55a1ca..9ece3f2bdd455 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -1451,7 +1451,7 @@ func ExtractFromCertificate(access UserGetter, cert *ssh.Certificate) ([]string, // ExtractFromIdentity will extract roles and traits from the *x509.Certificate // which Teleport passes along as a *tlsca.Identity. If roles and traits do not // exist in the certificates, they are extracted from the backend. -func ExtractFromIdentity(access UserGetter, identity *tlsca.Identity) ([]string, wrappers.Traits, error) { +func ExtractFromIdentity(access UserGetter, identity tlsca.Identity) ([]string, wrappers.Traits, error) { // For legacy certificates, fetch roles and traits from the services.User // object in the backend. if missingIdentity(identity) { @@ -1501,7 +1501,7 @@ func isFormatOld(cert *ssh.Certificate) bool { // missingIdentity returns true if the identity is missing or the identity // has no roles or traits. -func missingIdentity(identity *tlsca.Identity) bool { +func missingIdentity(identity tlsca.Identity) bool { if len(identity.Groups) == 0 || len(identity.Traits) == 0 { return true } diff --git a/lib/services/role_test.go b/lib/services/role_test.go index b95bc359fc921..faf90e07dc743 100644 --- a/lib/services/role_test.go +++ b/lib/services/role_test.go @@ -1527,7 +1527,7 @@ func (s *RoleSuite) TestExtractFrom(c *C) { roles, traits, err = ExtractFromIdentity(&userGetter{ roles: origRoles, traits: origTraits, - }, identity) + }, *identity) c.Assert(err, IsNil) c.Assert(roles, DeepEquals, origRoles) c.Assert(traits, DeepEquals, origTraits) @@ -1547,7 +1547,7 @@ func (s *RoleSuite) TestExtractFrom(c *C) { roles, traits, err = ExtractFromIdentity(&userGetter{ roles: origRoles, traits: origTraits, - }, identity) + }, *identity) c.Assert(err, IsNil) c.Assert(roles, DeepEquals, origRoles) c.Assert(traits, DeepEquals, origTraits) @@ -1586,7 +1586,7 @@ func (s *RoleSuite) TestExtractFromLegacy(c *C) { roles, traits, err = ExtractFromIdentity(&userGetter{ roles: origRoles, traits: origTraits, - }, identity) + }, *identity) c.Assert(err, IsNil) c.Assert(roles, DeepEquals, origRoles) c.Assert(traits, DeepEquals, origTraits) @@ -1607,7 +1607,7 @@ func (s *RoleSuite) TestExtractFromLegacy(c *C) { roles, traits, err = ExtractFromIdentity(&userGetter{ roles: newRoles, traits: newTraits, - }, identity) + }, *identity) c.Assert(err, IsNil) c.Assert(roles, DeepEquals, newRoles) c.Assert(traits, DeepEquals, newTraits) diff --git a/lib/srv/authhandlers.go b/lib/srv/authhandlers.go index 7dcc10b70823f..fbb6b56c3180b 100644 --- a/lib/srv/authhandlers.go +++ b/lib/srv/authhandlers.go @@ -45,8 +45,8 @@ type AuthHandlers struct { // Component is the type of SSH server (node, proxy, or recording proxy). Component string - // AuditLog is the service used to access Audit Log. - AuditLog events.IAuditLog + // Emitter is event emitter + Emitter events.Emitter // AccessPoint is used to access the Auth Server. AccessPoint auth.AccessPoint @@ -108,18 +108,29 @@ func (h *AuthHandlers) CheckPortForward(addr string, ctx *ServerContext) error { systemErrorMessage := fmt.Sprintf("port forwarding not allowed by role set: %v", ctx.Identity.RoleSet) userErrorMessage := "port forwarding not allowed" - // emit port forward failure event - if err := h.AuditLog.EmitAuditEvent(events.PortForwardFailure, events.EventFields{ - events.PortForwardAddr: addr, - events.PortForwardSuccess: false, - events.PortForwardErr: systemErrorMessage, - events.EventLogin: ctx.Identity.Login, - events.EventUser: ctx.Identity.TeleportUser, - events.LocalAddr: ctx.ServerConn.LocalAddr().String(), - events.RemoteAddr: ctx.ServerConn.RemoteAddr().String(), + // Emit port forward failure event + if err := h.Emitter.EmitAuditEvent(h.Server.Context(), &events.PortForward{ + Metadata: events.Metadata{ + Type: events.PortForwardEvent, + Code: events.PortForwardFailureCode, + }, + UserMetadata: events.UserMetadata{ + Login: ctx.Identity.Login, + User: ctx.Identity.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: ctx.ServerConn.LocalAddr().String(), + RemoteAddr: ctx.ServerConn.RemoteAddr().String(), + }, + Addr: addr, + Status: events.Status{ + Success: false, + Error: systemErrorMessage, + }, }); err != nil { - h.Warnf("Failed to emit port forward deny audit event: %v", err) + h.WithError(err).Warn("Failed to emit port forward deny audit event.") } + h.Warnf("Port forwarding request denied: %v.", systemErrorMessage) return trace.AccessDenied(userErrorMessage) @@ -162,14 +173,25 @@ func (h *AuthHandlers) UserKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s // only failed attempts are logged right now recordFailedLogin := func(err error) { - fields := events.EventFields{ - events.EventUser: teleportUser, - events.AuthAttemptSuccess: false, - events.AuthAttemptErr: err.Error(), - } - log.Warnf("failed login attempt %#v", fields) - if err := h.AuditLog.EmitAuditEvent(events.AuthAttemptFailure, fields); err != nil { - log.Warnf("Failed to emit failed login audit event: %v", err) + if err := h.Emitter.EmitAuditEvent(h.Server.Context(), &events.AuthAttempt{ + Metadata: events.Metadata{ + Type: events.AuthAttemptEvent, + Code: events.AuthAttemptFailureCode, + }, + UserMetadata: events.UserMetadata{ + Login: conn.User(), + User: teleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: conn.LocalAddr().String(), + RemoteAddr: conn.RemoteAddr().String(), + }, + Status: events.Status{ + Success: false, + Error: err.Error(), + }, + }); err != nil { + h.WithError(err).Warn("Failed to emit failed login audit event.") } } diff --git a/lib/srv/ctx.go b/lib/srv/ctx.go index 347c4c6af45cd..716384f8c9d18 100644 --- a/lib/srv/ctx.go +++ b/lib/srv/ctx.go @@ -69,6 +69,10 @@ func init() { // Server is regular or forwarding SSH server. type Server interface { + // Emitter allows server to emit audit events and create + // event streams for recording sessions + events.StreamEmitter + // ID is the unique ID of the server. ID() string @@ -89,12 +93,6 @@ type Server interface { // startup is allowed. PermitUserEnvironment() bool - // EmitAuditEvent emits an Audit Event to the Auth Server. - EmitAuditEvent(events.Event, events.EventFields) - - // GetAuditLog returns the Audit Log for this cluster. - GetAuditLog() events.IAuditLog - // GetAccessPoint returns an auth.AccessPoint for this cluster. GetAccessPoint() auth.AccessPoint @@ -119,6 +117,9 @@ type Server interface { // GetBPF returns the BPF service used for enhanced session recording. GetBPF() bpf.BPF + + // Context returns server shutdown context + Context() context.Context } // IdentityContext holds all identity information associated with the user @@ -240,6 +241,9 @@ type ServerContext struct { // on client inactivity, set to 0 if not setup clientIdleTimeout time.Duration + // cancelContext signals closure to all outstanding operations + cancelContext context.Context + // cancel is called whenever server context is closed cancel context.CancelFunc @@ -287,7 +291,7 @@ func NewServerContext(ctx context.Context, parent *sshutils.ConnectionContext, s return nil, nil, trace.Wrap(err) } - ctx, cancel := context.WithCancel(ctx) + cancelContext, cancel := context.WithCancel(ctx) child := &ServerContext{ ConnectionContext: parent, @@ -300,6 +304,7 @@ func NewServerContext(ctx context.Context, parent *sshutils.ConnectionContext, s ClusterConfig: clusterConfig, Identity: identityContext, clientIdleTimeout: identityContext.RoleSet.AdjustClientIdleTimeout(clusterConfig.GetClientIdleTimeout()), + cancelContext: cancelContext, cancel: cancel, } @@ -333,12 +338,12 @@ func NewServerContext(ctx context.Context, parent *sshutils.ConnectionContext, s Clock: child.srv.GetClock(), Tracker: child, Conn: child.ServerConn, - Context: ctx, + Context: cancelContext, TeleportUser: child.Identity.TeleportUser, Login: child.Identity.Login, ServerID: child.srv.ID(), - Audit: child.srv.GetAuditLog(), Entry: child.Entry, + Emitter: child.srv, }) if err != nil { child.Close() @@ -517,7 +522,7 @@ func (c *ServerContext) reportStats(conn utils.Stater) { if c.GetServer().Component() == teleport.ComponentProxy { return } - if c.ClusterConfig.GetSessionRecording() == services.RecordAtProxy && + if services.IsRecordAtProxy(c.ClusterConfig.GetSessionRecording()) && c.GetServer().Component() == teleport.ComponentNode { return } @@ -529,23 +534,34 @@ func (c *ServerContext) reportStats(conn utils.Stater) { // below, that is because the connection is held from the perspective of // the server not the client, but the logs are from the perspective of the // client. - eventFields := events.EventFields{ - events.DataTransmitted: rxBytes, - events.DataReceived: txBytes, - events.SessionServerID: c.GetServer().HostUUID(), - events.EventLogin: c.Identity.Login, - events.EventUser: c.Identity.TeleportUser, - events.RemoteAddr: c.ServerConn.RemoteAddr().String(), - events.EventIndex: events.SessionDataIndex, + sessionDataEvent := &events.SessionData{ + Metadata: events.Metadata{ + Index: events.SessionDataIndex, + Type: events.SessionDataEvent, + Code: events.SessionDataCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: c.GetServer().HostUUID(), + ServerNamespace: c.GetServer().GetNamespace(), + }, + UserMetadata: events.UserMetadata{ + User: c.Identity.TeleportUser, + Login: c.Identity.Login, + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: c.ServerConn.RemoteAddr().String(), + }, + BytesTransmitted: rxBytes, + BytesReceived: txBytes, } if !c.srv.UseTunnel() { - eventFields[events.LocalAddr] = c.ServerConn.LocalAddr().String() + sessionDataEvent.ConnectionMetadata.LocalAddr = c.ServerConn.LocalAddr().String() } if c.session != nil { - eventFields[events.SessionEventID] = c.session.id + sessionDataEvent.SessionMetadata.SessionID = string(c.session.id) } - if err := c.GetServer().GetAuditLog().EmitAuditEvent(events.SessionData, eventFields); err != nil { - c.Warnf("Failed to emit SessionData audit event: %v", err) + if err := c.GetServer().EmitAuditEvent(c.GetServer().Context(), sessionDataEvent); err != nil { + c.WithError(err).Warn("Failed to emit session data event.") } // Emit TX and RX bytes to their respective Prometheus counters. @@ -572,6 +588,12 @@ func (c *ServerContext) Close() error { return nil } +// CancelContext is a context associated with server context, +// closed whenever this server context is closed +func (c *ServerContext) CancelContext() context.Context { + return c.cancelContext +} + // CancelFunc gets the context.CancelFunc associated with // this context. Not a substitute for calling the // ServerContext.Close method. diff --git a/lib/srv/exec.go b/lib/srv/exec.go index 451b288ea60e0..be170688a5923 100644 --- a/lib/srv/exec.go +++ b/lib/srv/exec.go @@ -1,5 +1,5 @@ /* -Copyright 2015 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -93,7 +93,7 @@ func NewExecRequest(ctx *ServerContext, command string) (Exec, error) { // When in recording mode, return an *remoteExec which will execute the // command on a remote host. This is used by in-memory forwarding nodes. - if ctx.ClusterConfig.GetSessionRecording() == services.RecordAtProxy { + if services.IsRecordAtProxy(ctx.ClusterConfig.GetSessionRecording()) == true { return &remoteExec{ ctx: ctx, command: command, @@ -354,33 +354,40 @@ func (r *remoteExec) PID() int { } func emitExecAuditEvent(ctx *ServerContext, cmd string, execErr error) { - // Report the result of this exec event to the audit logger. - auditLog := ctx.srv.GetAuditLog() - if auditLog == nil { - log.Warnf("No audit log") - return + // Create common fields for event. + serverMeta := events.ServerMetadata{ + ServerID: ctx.srv.HostUUID(), + ServerNamespace: ctx.srv.GetNamespace(), } - var event events.Event + sessionMeta := events.SessionMetadata{} + if ctx.session != nil { + sessionMeta.SessionID = string(ctx.session.id) + } - // Create common fields for event. - fields := events.EventFields{ - events.EventUser: ctx.Identity.TeleportUser, - events.EventLogin: ctx.Identity.Login, - events.LocalAddr: ctx.ServerConn.LocalAddr().String(), - events.RemoteAddr: ctx.ServerConn.RemoteAddr().String(), - events.EventNamespace: ctx.srv.GetNamespace(), + userMeta := events.UserMetadata{ + User: ctx.Identity.TeleportUser, + Login: ctx.Identity.Login, + } + + connectionMeta := events.ConnectionMetadata{ + RemoteAddr: ctx.ServerConn.RemoteAddr().String(), + LocalAddr: ctx.ServerConn.LocalAddr().String(), + } + + commandMeta := events.CommandMetadata{ + Command: cmd, // Due to scp being inherently vulnerable to command injection, always // make sure the full command and exit code is recorded for accountability. // For more details, see the following. // // https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=327019 // https://bugzilla.mindrot.org/show_bug.cgi?id=1998 - events.ExecEventCode: strconv.Itoa(exitCode(execErr)), - events.ExecEventCommand: cmd, + ExitCode: strconv.Itoa(exitCode(execErr)), } + if execErr != nil { - fields[events.ExecEventError] = execErr.Error() + commandMeta.Error = execErr.Error() } // Parse the exec command to find out if it was SCP or not. @@ -392,33 +399,55 @@ func emitExecAuditEvent(ctx *ServerContext, cmd string, execErr error) { // Update appropriate fields based off if the request was SCP or not. if isSCP { - fields[events.SCPPath] = path - fields[events.SCPAction] = action + scpEvent := &events.SCP{ + Metadata: events.Metadata{ + Type: events.SCPEvent, + }, + ServerMetadata: serverMeta, + SessionMetadata: sessionMeta, + UserMetadata: userMeta, + ConnectionMetadata: connectionMeta, + CommandMetadata: commandMeta, + Path: path, + Action: action, + } + switch action { case events.SCPActionUpload: if execErr != nil { - event = events.SCPUploadFailure + scpEvent.Code = events.SCPUploadFailureCode } else { - event = events.SCPUpload + scpEvent.Code = events.SCPUploadCode } case events.SCPActionDownload: if execErr != nil { - event = events.SCPDownloadFailure + scpEvent.Code = events.SCPDownloadFailureCode } else { - event = events.SCPDownload + scpEvent.Code = events.SCPDownloadCode } } + if err := ctx.srv.EmitAuditEvent(ctx.srv.Context(), scpEvent); err != nil { + log.WithError(err).Warn("Failed to emit scp event.") + } } else { + execEvent := &events.Exec{ + Metadata: events.Metadata{ + Type: events.ExecEvent, + }, + ServerMetadata: serverMeta, + SessionMetadata: sessionMeta, + UserMetadata: userMeta, + ConnectionMetadata: connectionMeta, + CommandMetadata: commandMeta, + } if execErr != nil { - event = events.ExecFailure + execEvent.Code = events.ExecFailureCode } else { - event = events.Exec + execEvent.Code = events.ExecCode + } + if err := ctx.srv.EmitAuditEvent(ctx.srv.Context(), execEvent); err != nil { + log.WithError(err).Warn("Failed to emit exec event.") } - } - - // Emit the event. - if err := auditLog.EmitAuditEvent(event, fields); err != nil { - log.Warnf("Failed to emit exec audit event: %v", err) } } diff --git a/lib/srv/exec_test.go b/lib/srv/exec_test.go index 8a54b76b4350c..94c00321245e8 100644 --- a/lib/srv/exec_test.go +++ b/lib/srv/exec_test.go @@ -209,7 +209,7 @@ func (s *ExecSuite) TestLoginDefsParser(c *check.C) { // TestEmitExecAuditEvent make sure the full command and exit code for a // command is always recorded. func (s *ExecSuite) TestEmitExecAuditEvent(c *check.C) { - fakeLog, ok := s.ctx.srv.GetAuditLog().(*fakeLog) + fakeServer, ok := s.ctx.srv.(*fakeServer) c.Assert(ok, check.Equals, true) var tests = []struct { @@ -242,8 +242,9 @@ func (s *ExecSuite) TestEmitExecAuditEvent(c *check.C) { } for _, tt := range tests { emitExecAuditEvent(s.ctx, tt.inCommand, tt.inError) - c.Assert(fakeLog.lastEvent.GetString(events.ExecEventCommand), check.Equals, tt.outCommand) - c.Assert(fakeLog.lastEvent.GetString(events.ExecEventCode), check.Equals, tt.outCode) + execEvent := fakeServer.LastEvent().(*events.Exec) + c.Assert(execEvent.Command, check.Equals, tt.outCommand) + c.Assert(execEvent.ExitCode, check.Equals, tt.outCode) } } @@ -409,11 +410,16 @@ func (f *fakeTerminal) SetTermType(string) { // fakeServer is stub for tests type fakeServer struct { - auditLog events.IAuditLog + auditLog events.IAuditLog + events.MockEmitter accessPoint auth.AccessPoint id string } +func (f *fakeServer) Context() context.Context { + return context.TODO() +} + func (f *fakeServer) ID() string { return f.id } @@ -438,13 +444,6 @@ func (f *fakeServer) PermitUserEnvironment() bool { return true } -func (s *fakeServer) EmitAuditEvent(events.Event, events.EventFields) { -} - -func (f *fakeServer) GetAuditLog() events.IAuditLog { - return f.auditLog -} - func (f *fakeServer) GetAccessPoint() auth.AccessPoint { return f.accessPoint } @@ -479,12 +478,10 @@ func (f *fakeServer) GetBPF() bpf.BPF { // fakeLog is used in tests to obtain the last event emit to the Audit Log. type fakeLog struct { - lastEvent events.EventFields } -func (a *fakeLog) EmitAuditEvent(e events.Event, f events.EventFields) error { - a.lastEvent = f - return nil +func (a *fakeLog) EmitAuditEventLegacy(e events.Event, f events.EventFields) error { + return trace.NotImplemented("not implemented") } func (a *fakeLog) PostSessionSlice(s events.SessionSlice) error { diff --git a/lib/srv/forward/sshserver.go b/lib/srv/forward/sshserver.go index d26de1c7fd5e9..b5c83e6662a86 100644 --- a/lib/srv/forward/sshserver.go +++ b/lib/srv/forward/sshserver.go @@ -104,6 +104,9 @@ type Server struct { // to the client. hostCertificate ssh.Signer + // StreamEmitter points to the auth service and emits audit events + events.StreamEmitter + // authHandlers are common authorization and authentication handlers shared // by the regular and forwarding server. authHandlers *srv.AuthHandlers @@ -128,7 +131,6 @@ type Server struct { macAlgorithms []string authClient auth.ClientI - auditLog events.IAuditLog authService auth.AccessPoint sessionRegistry *srv.SessionRegistry sessionServer session.Service @@ -139,6 +141,14 @@ type Server struct { // hostUUID is the UUID of the underlying proxy that the forwarding server // is running in. hostUUID string + + // closeContext and closeCancel are used to signal to the outside + // world that this server is closed + closeContext context.Context + closeCancel context.CancelFunc + + // parentContext is used to signal server closure + parentContext context.Context } // ServerConfig is the configuration needed to create an instance of a Server. @@ -181,6 +191,13 @@ type ServerConfig struct { // HostUUID is the UUID of the underlying proxy that the forwarding server // is running in. HostUUID string + + // Emitter is audit events emitter + Emitter events.StreamEmitter + + // ParentContext is a parent context, used to signal global + // closure + ParentContext context.Context } // CheckDefaults makes sure all required parameters are passed in. @@ -209,7 +226,12 @@ func (s *ServerConfig) CheckDefaults() error { if s.Clock == nil { s.Clock = clockwork.NewRealClock() } - + if s.Emitter == nil { + return trace.BadParameter("missing parameter Emitter") + } + if s.ParentContext == nil { + s.ParentContext = context.TODO() + } return nil } @@ -246,12 +268,13 @@ func New(c ServerConfig) (*Server, error) { useTunnel: c.UseTunnel, address: c.Address, authClient: c.AuthClient, - auditLog: c.AuthClient, authService: c.AuthClient, sessionServer: c.AuthClient, dataDir: c.DataDir, clock: c.Clock, hostUUID: c.HostUUID, + StreamEmitter: c.Emitter, + parentContext: c.ParentContext, } // Set the ciphers, KEX, and MACs that the in-memory server will send to the @@ -273,9 +296,9 @@ func New(c ServerConfig) (*Server, error) { }), Server: s, Component: teleport.ComponentForwardingNode, - AuditLog: c.AuthClient, AccessPoint: c.AuthClient, FIPS: c.FIPS, + Emitter: c.Emitter, } // Common term handlers. @@ -283,9 +306,19 @@ func New(c ServerConfig) (*Server, error) { SessionRegistry: s.sessionRegistry, } + // Create a close context that is used internally to signal when the server + // is closing and for any blocking goroutines to unblock. + s.closeContext, s.closeCancel = context.WithCancel(c.ParentContext) + return s, nil } +// Context returns parent context, used to signal +// that parent server has been closed +func (s *Server) Context() context.Context { + return s.parentContext +} + // GetDataDir returns server local storage func (s *Server) GetDataDir() string { return s.dataDir @@ -318,29 +351,12 @@ func (s *Server) Component() string { return teleport.ComponentForwardingNode } -// EmitAuditEvent sends an event to the Audit Log. -func (s *Server) EmitAuditEvent(event events.Event, fields events.EventFields) { - auditLog := s.GetAuditLog() - if auditLog != nil { - if err := auditLog.EmitAuditEvent(event, fields); err != nil { - s.log.Error(err) - } - } else { - s.log.Warn("SSH server has no audit log") - } -} - // PermitUserEnvironment is always false because it's up the the remote host // to decide if the user environment will be read or not. func (s *Server) PermitUserEnvironment() bool { return false } -// GetAuditLog returns the Audit Log for this cluster. -func (s *Server) GetAuditLog() events.IAuditLog { - return s.auditLog -} - // GetAccessPoint returns an auth.AccessPoint for this cluster. func (s *Server) GetAccessPoint() auth.AccessPoint { return s.authService @@ -507,6 +523,9 @@ func (s *Server) Close() error { } } + // Signal to the outside world that this server is closed + s.closeCancel() + return trace.NewAggregate(errs...) } @@ -679,15 +698,26 @@ func (s *Server) handleDirectTCPIPRequest(ctx context.Context, ch ssh.Channel, r } defer conn.Close() - // Emit a port forwarding audit event. - s.EmitAuditEvent(events.PortForward, events.EventFields{ - events.PortForwardAddr: scx.DstAddr, - events.PortForwardSuccess: true, - events.EventLogin: s.identityContext.Login, - events.EventUser: s.identityContext.TeleportUser, - events.LocalAddr: s.sconn.LocalAddr().String(), - events.RemoteAddr: s.sconn.RemoteAddr().String(), - }) + if err := s.EmitAuditEvent(s.closeContext, &events.PortForward{ + Metadata: events.Metadata{ + Type: events.PortForwardEvent, + Code: events.PortForwardCode, + }, + UserMetadata: events.UserMetadata{ + Login: s.identityContext.Login, + User: s.identityContext.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: s.sconn.LocalAddr().String(), + RemoteAddr: s.sconn.RemoteAddr().String(), + }, + Addr: scx.DstAddr, + Status: events.Status{ + Success: true, + }, + }); err != nil { + scx.WithError(err).Warn("Failed to emit port forward event.") + } var wg sync.WaitGroup wch := make(chan struct{}) @@ -1006,19 +1036,28 @@ func (s *Server) serveX11Channels(ctx context.Context) error { // handleX11Forward handles an X11 forwarding request from the client. func (s *Server) handleX11Forward(ctx context.Context, ch ssh.Channel, req *ssh.Request, scx *srv.ServerContext) error { - // setup common audit event fields - fields := events.EventFields{ - events.EventLogin: s.identityContext.Login, - events.EventUser: s.identityContext.TeleportUser, - events.LocalAddr: s.sconn.LocalAddr().String(), - events.RemoteAddr: s.sconn.RemoteAddr().String(), + event := events.X11Forward{ + Metadata: events.Metadata{ + Type: events.X11ForwardEvent, + }, + UserMetadata: events.UserMetadata{ + Login: s.identityContext.Login, + User: s.identityContext.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: s.sconn.LocalAddr().String(), + RemoteAddr: s.sconn.RemoteAddr().String(), + }, } // check if RBAC permits X11 forwarding if !scx.Identity.RoleSet.PermitX11Forwarding() { - fields[events.X11ForwardSuccess] = false - fields[events.X11ForwardErr] = "x11 forwarding not permitted" - s.EmitAuditEvent(events.X11ForwardFailure, fields) + event.Metadata.Code = events.X11ForwardFailureCode + event.Status.Success = false + event.Status.Error = "x11 forwarding not permitted" + if err := s.EmitAuditEvent(s.closeContext, &event); err != nil { + s.log.WithError(err).Warn("Failed to emit X11 forward event.") + } s.replyError(ch, req, trace.AccessDenied("x11 forwarding not permitted")) // failed X11 requests are ok from a protocol perspective, so // we don't actually return an error here. @@ -1029,11 +1068,14 @@ func (s *Server) handleX11Forward(ctx context.Context, ch ssh.Channel, req *ssh. ok, err := forwardRequest(scx.RemoteSession, req) if err != nil || !ok { // request failed or was denied - fields[events.X11ForwardSuccess] = false + event.Metadata.Code = events.X11ForwardFailureCode + event.Status.Success = false if err != nil { - fields[events.X11ForwardErr] = err.Error() + event.Status.Error = err.Error() + } + if err := s.EmitAuditEvent(s.closeContext, &event); err != nil { + s.log.WithError(err).Warn("Failed to emit X11 forward event.") } - s.EmitAuditEvent(events.X11ForwardFailure, fields) return trace.Wrap(err) } @@ -1043,8 +1085,11 @@ func (s *Server) handleX11Forward(ctx context.Context, ch ssh.Channel, req *ssh. } }() - fields[events.X11ForwardSuccess] = true - s.EmitAuditEvent(events.X11Forward, fields) + event.Status.Success = true + event.Metadata.Code = events.X11ForwardCode + if err := s.EmitAuditEvent(s.closeContext, &event); err != nil { + s.log.WithError(err).Warn("Failed to emit X11 forward event.") + } return nil } diff --git a/lib/srv/forward/subsystem.go b/lib/srv/forward/subsystem.go index 1a295d5b44320..6b505a5334fd7 100644 --- a/lib/srv/forward/subsystem.go +++ b/lib/srv/forward/subsystem.go @@ -131,16 +131,29 @@ func (r *remoteSubsystem) Wait() error { func (r *remoteSubsystem) emitAuditEvent(err error) { srv := r.serverContext.GetServer() - event := events.Subsystem + subsystemEvent := &events.Subsystem{ + Metadata: events.Metadata{ + Type: events.SubsystemEvent, + }, + UserMetadata: events.UserMetadata{ + User: r.serverContext.Identity.TeleportUser, + Login: r.serverContext.Identity.Login, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: r.serverContext.RemoteClient.LocalAddr().String(), + RemoteAddr: r.serverContext.RemoteClient.RemoteAddr().String(), + }, + Name: r.subsytemName, + } + if err != nil { - event = events.SubsystemFailure + subsystemEvent.Code = events.SubsystemFailureCode + subsystemEvent.Error = err.Error() + } else { + subsystemEvent.Code = events.SubsystemCode + } + + if err := srv.EmitAuditEvent(srv.Context(), subsystemEvent); err != nil { + r.log.WithError(err).Warn("Failed to emit subsystem audit event.") } - srv.EmitAuditEvent(event, events.EventFields{ - events.SubsystemName: r.subsytemName, - events.SubsystemError: err, - events.EventUser: r.serverContext.Identity.TeleportUser, - events.EventLogin: r.serverContext.Identity.Login, - events.LocalAddr: r.serverContext.RemoteClient.LocalAddr().String(), - events.RemoteAddr: r.serverContext.RemoteClient.RemoteAddr().String(), - }) } diff --git a/lib/srv/monitor.go b/lib/srv/monitor.go index 121b028e20f06..dd89f9022aa21 100644 --- a/lib/srv/monitor.go +++ b/lib/srv/monitor.go @@ -72,8 +72,8 @@ type MonitorConfig struct { TeleportUser string // ServerID is a session server ID ServerID string - // Audit is audit log - Audit events.IAuditLog + // Emitter is events emitter + Emitter events.Emitter // Entry is a logging entry Entry *log.Entry } @@ -95,8 +95,8 @@ func (m *MonitorConfig) CheckAndSetDefaults() error { if m.Tracker == nil { return trace.BadParameter("missing parameter Tracker") } - if m.Audit == nil { - return trace.BadParameter("missing parameter Audit") + if m.Emitter == nil { + return trace.BadParameter("missing parameter Emitter") } if m.Clock == nil { m.Clock = clockwork.NewRealClock() @@ -142,41 +142,60 @@ func (w *Monitor) Start() { select { // certificate has expired, disconnect case <-certTime: - event := events.EventFields{ - events.EventType: events.ClientDisconnectEvent, - events.EventLogin: w.Login, - events.EventUser: w.TeleportUser, - events.LocalAddr: w.Conn.LocalAddr().String(), - events.RemoteAddr: w.Conn.RemoteAddr().String(), - events.SessionServerID: w.ServerID, - events.Reason: fmt.Sprintf("client certificate expired at %v", w.Clock.Now().UTC()), + event := &events.ClientDisconnect{ + Metadata: events.Metadata{ + Type: events.ClientDisconnectEvent, + Code: events.ClientDisconnectCode, + }, + UserMetadata: events.UserMetadata{ + Login: w.Login, + User: w.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: w.Conn.LocalAddr().String(), + RemoteAddr: w.Conn.RemoteAddr().String(), + }, + ServerMetadata: events.ServerMetadata{ + ServerID: w.ServerID, + }, + Reason: fmt.Sprintf("client certificate expired at %v", w.Clock.Now().UTC()), } - if err := w.Audit.EmitAuditEvent(events.ClientDisconnect, event); err != nil { - w.Entry.Warningf("failed emitting audit event: %v", err) + if err := w.Emitter.EmitAuditEvent(w.Context, event); err != nil { + w.Entry.WithError(err).Warn("Failed to emit audit event.") } - w.Entry.Debugf("Disconnecting client: %v", event[events.Reason]) + w.Entry.Debugf("Disconnecting client: %v", event.Reason) w.Conn.Close() return case <-idleTime: now := w.Clock.Now().UTC() clientLastActive := w.Tracker.GetClientLastActive() if now.Sub(clientLastActive) >= w.ClientIdleTimeout { - event := events.EventFields{ - events.EventLogin: w.Login, - events.EventUser: w.TeleportUser, - events.LocalAddr: w.Conn.LocalAddr().String(), - events.RemoteAddr: w.Conn.RemoteAddr().String(), - events.SessionServerID: w.ServerID, + event := &events.ClientDisconnect{ + Metadata: events.Metadata{ + Type: events.ClientDisconnectEvent, + Code: events.ClientDisconnectCode, + }, + UserMetadata: events.UserMetadata{ + Login: w.Login, + User: w.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: w.Conn.LocalAddr().String(), + RemoteAddr: w.Conn.RemoteAddr().String(), + }, + ServerMetadata: events.ServerMetadata{ + ServerID: w.ServerID, + }, } if clientLastActive.IsZero() { - event[events.Reason] = "client reported no activity" + event.Reason = "client reported no activity" } else { - event[events.Reason] = fmt.Sprintf("client is idle for %v, exceeded idle timeout of %v", + event.Reason = fmt.Sprintf("client is idle for %v, exceeded idle timeout of %v", now.Sub(clientLastActive), w.ClientIdleTimeout) } - w.Entry.Debugf("Disconnecting client: %v", event[events.Reason]) - if err := w.Audit.EmitAuditEvent(events.ClientDisconnect, event); err != nil { - w.Entry.Warningf("failed emitting audit event: %v", err) + w.Entry.Debugf("Disconnecting client: %v", event.Reason) + if err := w.Emitter.EmitAuditEvent(w.Context, event); err != nil { + w.Entry.WithError(err).Warn("Failed to emit audit event.") } w.Conn.Close() return diff --git a/lib/srv/regular/sshserver.go b/lib/srv/regular/sshserver.go index a03d49a7b282d..74edf08c0596d 100644 --- a/lib/srv/regular/sshserver.go +++ b/lib/srv/regular/sshserver.go @@ -1,5 +1,5 @@ /* -Copyright 2015 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -101,9 +101,8 @@ type Server struct { // ctx is broadcasting context closure ctx context.Context - // alog points to the AuditLog this server uses to report - // auditable events - alog events.IAuditLog + // StreamEmitter points to the auth service and emits audit events + events.StreamEmitter // clock is a system clock clock clockwork.Clock @@ -169,13 +168,6 @@ func (s *Server) GetNamespace() string { return s.namespace } -func (s *Server) GetAuditLog() events.IAuditLog { - if s.isAuditedAtProxy() { - return events.NewDiscardAuditLog() - } - return s.alog -} - func (s *Server) GetAccessPoint() auth.AccessPoint { return s.authService } @@ -212,7 +204,7 @@ func (s *Server) isAuditedAtProxy() bool { return false } - isRecordAtProxy := clusterConfig.GetSessionRecording() == services.RecordAtProxy + isRecordAtProxy := services.IsRecordAtProxy(clusterConfig.GetSessionRecording()) isTeleportNode := s.Component() == teleport.ComponentNode if isRecordAtProxy && isTeleportNode { @@ -362,7 +354,7 @@ func SetLabels(labels map[string]string, if label.GetPeriod() < time.Second { label.SetPeriod(time.Second) cmdLabels[name] = label - log.Warningf("label period can't be less that 1 second. Period for label '%v' was set to 1 second", name) + log.Warningf("Label period can't be less than 1 second. Period for label '%v' was set to 1 second.", name) } } s.cmdLabels = cmdLabels @@ -378,10 +370,10 @@ func SetLimiter(limiter *limiter.Limiter) ServerOption { } } -// SetAuditLog assigns an audit log interfaces to this server -func SetAuditLog(alog events.IAuditLog) ServerOption { +// SetEmitter assigns an audit event emitter for this server +func SetEmitter(emitter events.StreamEmitter) ServerOption { return func(s *Server) error { - s.alog = alog + s.StreamEmitter = emitter return nil } } @@ -512,8 +504,8 @@ func New(addr utils.NetAddr, } // TODO(klizhentas): replace function arguments with struct - if s.alog == nil { - return nil, trace.BadParameter("setup valid AuditLog parameter using SetAuditLog") + if s.StreamEmitter == nil { + return nil, trace.BadParameter("setup valid Emitter parameter using SetEmitter") } if s.namespace == "" { @@ -545,9 +537,9 @@ func New(addr utils.NetAddr, }), Server: s, Component: component, - AuditLog: s.alog, AccessPoint: s.authService, FIPS: s.fips, + Emitter: s.StreamEmitter, } // common term handlers @@ -603,6 +595,11 @@ func (s *Server) getNamespace() string { return services.ProcessNamespace(s.namespace) } +// Context returns server shutdown context +func (s *Server) Context() context.Context { + return s.ctx +} + func (s *Server) Component() string { if s.proxyMode { return teleport.ComponentProxy @@ -820,22 +817,6 @@ func (s *Server) serveAgent(ctx *srv.ServerContext) error { return nil } -// EmitAuditEvent logs a given event to the audit log attached to the -// server who owns these sessions -func (s *Server) EmitAuditEvent(event events.Event, fields events.EventFields) { - log.Debugf("server.EmitAuditEvent(%v)", event.Name) - alog := s.alog - if alog != nil { - // record the event time with ms precision - fields[events.EventTime] = s.clock.Now().In(time.UTC).Round(time.Millisecond) - if err := alog.EmitAuditEvent(event, fields); err != nil { - log.Error(trace.DebugReport(err)) - } - } else { - log.Warn("SSH server has no audit log") - } -} - // HandleRequest processes global out-of-band requests. Global out-of-band // requests are processed in order (this way the originator knows which // request we are responding to). If Teleport does not support the request @@ -901,13 +882,28 @@ func (s *Server) HandleNewConn(ctx context.Context, ccx *sshutils.ConnectionCont if err != nil { if strings.Contains(err.Error(), teleport.MaxLeases) { // user has exceeded their max concurrent ssh connections. - s.EmitAuditEvent(events.SessionRejected, events.EventFields{ - events.Reason: events.SessionRejectedReasonMaxConnections, - events.Maximum: maxConnections, - events.EventProtocol: events.EventProtocolSSH, - events.EventUser: identityContext.TeleportUser, - events.SessionServerID: s.uuid, - }) + if err := s.EmitAuditEvent(s.ctx, &events.SessionReject{ + Metadata: events.Metadata{ + Type: events.SessionRejectedEvent, + Code: events.SessionRejectedCode, + }, + UserMetadata: events.UserMetadata{ + Login: identityContext.Login, + User: identityContext.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + Protocol: events.EventProtocolSSH, + LocalAddr: ccx.ServerConn.LocalAddr().String(), + RemoteAddr: ccx.ServerConn.RemoteAddr().String(), + }, + ServerMetadata: events.ServerMetadata{ + ServerID: s.uuid, + }, + Reason: events.SessionRejectedReasonMaxConnections, + Maximum: maxConnections, + }); err != nil { + log.WithError(err).Warn("Failed to emit session reject event.") + } err = trace.AccessDenied("too many concurrent ssh connections for user %q (max=%d)", identityContext.TeleportUser, maxConnections, @@ -984,13 +980,28 @@ func (s *Server) HandleNewChan(ctx context.Context, ccx *sshutils.ConnectionCont d, ok := ccx.IncrSessions(max) if !ok { // user has exceeded their max concurrent ssh sessions. - s.EmitAuditEvent(events.SessionRejected, events.EventFields{ - events.Reason: events.SessionRejectedReasonMaxSessions, - events.Maximum: max, - events.EventProtocol: events.EventProtocolSSH, - events.EventUser: identityContext.TeleportUser, - events.SessionServerID: s.uuid, - }) + if err := s.EmitAuditEvent(s.ctx, &events.SessionReject{ + Metadata: events.Metadata{ + Type: events.SessionRejectedEvent, + Code: events.SessionRejectedCode, + }, + UserMetadata: events.UserMetadata{ + Login: identityContext.Login, + User: identityContext.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + Protocol: events.EventProtocolSSH, + LocalAddr: ccx.ServerConn.LocalAddr().String(), + RemoteAddr: ccx.ServerConn.RemoteAddr().String(), + }, + ServerMetadata: events.ServerMetadata{ + ServerID: s.uuid, + }, + Reason: events.SessionRejectedReasonMaxSessions, + Maximum: max, + }); err != nil { + log.WithError(err).Warn("Failed to emit sesion reject event.") + } rejectChannel(nch, ssh.Prohibited, fmt.Sprintf("too many session channels for user %q (max=%d)", identityContext.TeleportUser, max)) return } @@ -1135,14 +1146,26 @@ Loop: } // Emit a port forwarding event. - s.EmitAuditEvent(events.PortForward, events.EventFields{ - events.PortForwardAddr: scx.DstAddr, - events.PortForwardSuccess: true, - events.EventLogin: scx.Identity.Login, - events.EventUser: scx.Identity.TeleportUser, - events.LocalAddr: scx.ServerConn.LocalAddr().String(), - events.RemoteAddr: scx.ServerConn.RemoteAddr().String(), - }) + if err := s.EmitAuditEvent(s.ctx, &events.PortForward{ + Metadata: events.Metadata{ + Type: events.PortForwardEvent, + Code: events.PortForwardCode, + }, + UserMetadata: events.UserMetadata{ + Login: scx.Identity.Login, + User: scx.Identity.TeleportUser, + }, + ConnectionMetadata: events.ConnectionMetadata{ + LocalAddr: scx.ServerConn.LocalAddr().String(), + RemoteAddr: scx.ServerConn.RemoteAddr().String(), + }, + Addr: scx.DstAddr, + Status: events.Status{ + Success: true, + }, + }); err != nil { + log.WithError(err).Warn("Failed to emit port forward event.") + } } // handleSessionRequests handles out of band session requests once the session @@ -1335,7 +1358,7 @@ func (s *Server) handleAgentForwardNode(req *ssh.Request, ctx *srv.ServerContext func (s *Server) handleAgentForwardProxy(req *ssh.Request, ctx *srv.ServerContext) error { // Forwarding an agent to the proxy is only supported when the proxy is in // recording mode. - if ctx.ClusterConfig.GetSessionRecording() != services.RecordAtProxy { + if services.IsRecordAtProxy(ctx.ClusterConfig.GetSessionRecording()) == false { return trace.BadParameter("agent forwarding to proxy only supported in recording mode") } @@ -1422,7 +1445,7 @@ func (s *Server) handleRecordingProxy(req *ssh.Request) { // reply true that we were able to process the message and reply with a // bool if we are in recording mode or not - recordingProxy = clusterConfig.GetSessionRecording() == services.RecordAtProxy + recordingProxy = services.IsRecordAtProxy(clusterConfig.GetSessionRecording()) err = req.Reply(true, []byte(strconv.FormatBool(recordingProxy))) if err != nil { log.Warnf("Unable to respond to global request (%v, %v): %v: %v", req.Type, req.WantReply, recordingProxy, err) @@ -1489,7 +1512,7 @@ func (s *Server) handleProxyJump(ctx context.Context, ccx *sshutils.ConnectionCo // "out of band", before SSH client actually asks for it // which is a hack, but the only way we can think of making it work, // ideas are appreciated. - if clusterConfig.GetSessionRecording() == services.RecordAtProxy { + if services.IsRecordAtProxy(clusterConfig.GetSessionRecording()) { err = s.handleAgentForwardProxy(&ssh.Request{}, scx) if err != nil { log.Warningf("Failed to request agent in recording mode: %v", err) diff --git a/lib/srv/regular/sshserver_test.go b/lib/srv/regular/sshserver_test.go index 126f8b8246e72..226bde70a246e 100644 --- a/lib/srv/regular/sshserver_test.go +++ b/lib/srv/regular/sshserver_test.go @@ -51,6 +51,7 @@ import ( "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/trace" + "github.com/pborman/uuid" . "gopkg.in/check.v1" ) @@ -69,7 +70,9 @@ type SrvSuite struct { user string server *auth.TestTLSServer proxyClient *auth.Client + proxyID string nodeClient *auth.Client + nodeID string adminClient *auth.Client testServer *auth.TestAuthServer } @@ -118,7 +121,13 @@ func (s *SrvSuite) SetUpTest(c *C) { s.testServer = authServer // create proxy client used in some tests - s.proxyClient, err = s.server.NewClient(auth.TestBuiltin(teleport.RoleProxy)) + s.proxyID = uuid.New() + s.proxyClient, err = s.server.NewClient(auth.TestIdentity{ + I: auth.BuiltinRole{ + Role: teleport.RoleProxy, + Username: s.proxyID, + }, + }) c.Assert(err, IsNil) // admin client is for admin actions, e.g. creating new users @@ -141,7 +150,13 @@ func (s *SrvSuite) SetUpTest(c *C) { s.signer, err = sshutils.NewSigner(certs.Key, certs.Cert) c.Assert(err, IsNil) - s.nodeClient, err = s.server.NewClient(auth.TestBuiltin(teleport.RoleNode)) + s.nodeID = uuid.New() + s.nodeClient, err = s.server.NewClient(auth.TestIdentity{ + I: auth.BuiltinRole{ + Role: teleport.RoleNode, + Username: s.nodeID, + }, + }) c.Assert(err, IsNil) nodeDir := c.MkDir() @@ -153,8 +168,9 @@ func (s *SrvSuite) SetUpTest(c *C) { nodeDir, "", utils.NetAddr{}, + SetUUID(s.nodeID), SetNamespace(defaults.Namespace), - SetAuditLog(s.nodeClient), + SetEmitter(s.nodeClient), SetShell("/bin/sh"), SetSessionServer(s.nodeClient), SetPAMConfig(&pam.Config{Enabled: false}), @@ -695,6 +711,7 @@ func (s *SrvSuite) TestProxyReverseTunnel(c *C) { DirectClusters: []reversetunnel.DirectCluster{{Name: s.server.ClusterName(), Client: s.proxyClient}}, DataDir: c.MkDir(), Component: teleport.ComponentProxy, + Emitter: s.proxyClient, }) c.Assert(err, IsNil) c.Assert(reverseTunnelServer.Start(), IsNil) @@ -707,10 +724,10 @@ func (s *SrvSuite) TestProxyReverseTunnel(c *C) { c.MkDir(), "", utils.NetAddr{}, - SetUUID(hostID), + SetUUID(s.proxyID), SetProxyMode(reverseTunnelServer), SetSessionServer(s.proxyClient), - SetAuditLog(s.nodeClient), + SetEmitter(s.nodeClient), SetNamespace(defaults.Namespace), SetPAMConfig(&pam.Config{Enabled: false}), SetBPF(&bpf.NOP{}), @@ -787,10 +804,10 @@ func (s *SrvSuite) TestProxyReverseTunnel(c *C) { }, ), SetSessionServer(s.nodeClient), - SetAuditLog(s.nodeClient), SetNamespace(defaults.Namespace), SetPAMConfig(&pam.Config{Enabled: false}), SetBPF(&bpf.NOP{}), + SetEmitter(s.nodeClient), ) c.Assert(err, IsNil) c.Assert(err, IsNil) @@ -857,6 +874,7 @@ func (s *SrvSuite) TestProxyRoundRobin(c *C) { NewCachingAccessPoint: auth.NoCache, DirectClusters: []reversetunnel.DirectCluster{{Name: s.server.ClusterName(), Client: s.proxyClient}}, DataDir: c.MkDir(), + Emitter: s.proxyClient, }) c.Assert(err, IsNil) @@ -872,7 +890,7 @@ func (s *SrvSuite) TestProxyRoundRobin(c *C) { utils.NetAddr{}, SetProxyMode(reverseTunnelServer), SetSessionServer(s.proxyClient), - SetAuditLog(s.nodeClient), + SetEmitter(s.nodeClient), SetNamespace(defaults.Namespace), SetPAMConfig(&pam.Config{Enabled: false}), SetBPF(&bpf.NOP{}), @@ -958,6 +976,7 @@ func (s *SrvSuite) TestProxyDirectAccess(c *C) { NewCachingAccessPoint: auth.NoCache, DirectClusters: []reversetunnel.DirectCluster{{Name: s.server.ClusterName(), Client: s.proxyClient}}, DataDir: c.MkDir(), + Emitter: s.proxyClient, }) c.Assert(err, IsNil) @@ -971,7 +990,7 @@ func (s *SrvSuite) TestProxyDirectAccess(c *C) { utils.NetAddr{}, SetProxyMode(reverseTunnelServer), SetSessionServer(s.proxyClient), - SetAuditLog(s.nodeClient), + SetEmitter(s.nodeClient), SetNamespace(defaults.Namespace), SetPAMConfig(&pam.Config{Enabled: false}), SetBPF(&bpf.NOP{}), @@ -1081,7 +1100,7 @@ func (s *SrvSuite) TestLimiter(c *C) { SetLimiter(limiter), SetShell("/bin/sh"), SetSessionServer(s.nodeClient), - SetAuditLog(s.nodeClient), + SetEmitter(s.nodeClient), SetNamespace(defaults.Namespace), SetPAMConfig(&pam.Config{Enabled: false}), SetBPF(&bpf.NOP{}), diff --git a/lib/srv/sess.go b/lib/srv/sess.go index 1e4263834009e..8fa349efa618e 100644 --- a/lib/srv/sess.go +++ b/lib/srv/sess.go @@ -1,5 +1,5 @@ /* -Copyright 2015 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,9 +30,11 @@ import ( "github.com/gravitational/teleport/lib/bpf" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" + "github.com/gravitational/teleport/lib/events/filesessions" "github.com/gravitational/teleport/lib/services" rsession "github.com/gravitational/teleport/lib/session" "github.com/gravitational/teleport/lib/sshutils" + "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/trace" "github.com/prometheus/client_golang/prometheus" @@ -122,22 +124,35 @@ func (s *SessionRegistry) Close() { // emitSessionJoinEvent emits a session join event to both the Audit Log as // well as sending a "x-teleport-event" global request on the SSH connection. func (s *SessionRegistry) emitSessionJoinEvent(ctx *ServerContext) { - sessionJoinEvent := events.EventFields{ - events.EventType: events.SessionJoinEvent, - events.SessionEventID: string(ctx.session.id), - events.EventNamespace: s.srv.GetNamespace(), - events.EventLogin: ctx.Identity.Login, - events.EventUser: ctx.Identity.TeleportUser, - events.RemoteAddr: ctx.ServerConn.RemoteAddr().String(), - events.SessionServerID: ctx.srv.HostUUID(), + sessionJoinEvent := &events.SessionJoin{ + Metadata: events.Metadata{ + Type: events.SessionJoinEvent, + Code: events.SessionJoinCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.srv.HostUUID(), + ServerNamespace: s.srv.GetNamespace(), + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(ctx.session.id), + }, + UserMetadata: events.UserMetadata{ + User: ctx.Identity.TeleportUser, + Login: ctx.Identity.Login, + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: ctx.ServerConn.RemoteAddr().String(), + }, } // Local address only makes sense for non-tunnel nodes. if !ctx.srv.UseTunnel() { - sessionJoinEvent[events.LocalAddr] = ctx.ServerConn.LocalAddr().String() + sessionJoinEvent.ConnectionMetadata.LocalAddr = ctx.ServerConn.LocalAddr().String() } // Emit session join event to Audit Log. - ctx.session.emitAuditEvent(events.SessionJoin, sessionJoinEvent) + if err := ctx.session.recorder.EmitAuditEvent(ctx.srv.Context(), sessionJoinEvent); err != nil { + s.log.WithError(err).Warn("Failed to emit session join event.") + } // Notify all members of the party that a new member has joined over the // "x-teleport-event" channel. @@ -227,21 +242,32 @@ func (s *SessionRegistry) OpenExecSession(channel ssh.Channel, req *ssh.Request, // emitSessionLeaveEvent emits a session leave event to both the Audit Log as // well as sending a "x-teleport-event" global request on the SSH connection. func (s *SessionRegistry) emitSessionLeaveEvent(party *party) { - sessionLeaveEvent := events.EventFields{ - events.EventType: events.SessionLeaveEvent, - events.SessionEventID: party.id.String(), - events.EventUser: party.user, - events.SessionServerID: party.ctx.srv.HostUUID(), - events.EventNamespace: s.srv.GetNamespace(), + sessionLeaveEvent := &events.SessionLeave{ + Metadata: events.Metadata{ + Type: events.SessionLeaveEvent, + Code: events.SessionLeaveCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: party.ctx.srv.HostUUID(), + ServerNamespace: s.srv.GetNamespace(), + }, + SessionMetadata: events.SessionMetadata{ + SessionID: party.id.String(), + }, + UserMetadata: events.UserMetadata{ + User: party.user, + }, } // Emit session leave event to Audit Log. - party.s.emitAuditEvent(events.SessionLeave, sessionLeaveEvent) + if err := party.s.recorder.EmitAuditEvent(s.srv.Context(), sessionLeaveEvent); err != nil { + s.log.WithError(err).Warn("Failed to emit session leave event.") + } // Notify all members of the party that a new member has left over the // "x-teleport-event" channel. for _, p := range s.getParties(party.s) { - eventPayload, err := json.Marshal(sessionLeaveEvent) + eventPayload, err := utils.FastMarshal(sessionLeaveEvent) if err != nil { s.log.Warnf("Unable to marshal %v for %v: %v.", events.SessionJoinEvent, p.sconn.RemoteAddr(), err) continue @@ -292,24 +318,37 @@ func (s *SessionRegistry) leaveSession(party *party) error { start, end := sess.startTime, time.Now().UTC() // Emit a session.end event for this (interactive) session. - eventFields := events.EventFields{ - events.SessionEventID: string(sess.id), - events.SessionServerID: party.ctx.srv.HostUUID(), - events.EventUser: party.user, - events.EventNamespace: s.srv.GetNamespace(), - events.SessionInteractive: true, - events.SessionEnhancedRecording: sess.hasEnhancedRecording, - events.SessionParticipants: sess.exportParticipants(), - events.SessionServerHostname: s.srv.GetInfo().GetHostname(), - events.SessionServerAddr: s.srv.GetInfo().GetAddr(), - events.SessionStartTime: start, - events.SessionEndTime: end, + sessionEndEvent := &events.SessionEnd{ + Metadata: events.Metadata{ + Type: events.SessionEndEvent, + Code: events.SessionEndCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: party.ctx.srv.HostUUID(), + ServerNamespace: s.srv.GetNamespace(), + ServerHostname: s.srv.GetInfo().GetHostname(), + ServerAddr: s.srv.GetInfo().GetAddr(), + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(sess.id), + }, + UserMetadata: events.UserMetadata{ + User: party.user, + }, + EnhancedRecording: sess.hasEnhancedRecording, + Participants: sess.exportParticipants(), + Interactive: true, + StartTime: start, + EndTime: end, + } + if err := sess.recorder.EmitAuditEvent(s.srv.Context(), sessionEndEvent); err != nil { + s.log.WithError(err).Warn("Failed to emit session end event.") } - sess.emitAuditEvent(events.SessionEnd, eventFields) - // close recorder to free up associated resources - // and flush data - sess.recorder.Close() + // close recorder to free up associated resources and flush data + if err := sess.recorder.Close(s.srv.Context()); err != nil { + s.log.WithError(err).Warn("Failed to close recorder.") + } if err := sess.Close(); err != nil { s.log.Errorf("Unable to close session %v: %v", sess.id, err) @@ -359,19 +398,30 @@ func (s *SessionRegistry) NotifyWinChange(params rsession.TerminalParams, ctx *S sid := ctx.session.id // Build the resize event. - resizeEvent := events.EventFields{ - events.EventType: events.ResizeEvent, - events.EventNamespace: s.srv.GetNamespace(), - events.SessionEventID: sid, - events.SessionServerID: ctx.srv.HostUUID(), - events.EventLogin: ctx.Identity.Login, - events.EventUser: ctx.Identity.TeleportUser, - events.TerminalSize: params.Serialize(), + resizeEvent := &events.Resize{ + Metadata: events.Metadata{ + Type: events.ResizeEvent, + Code: events.TerminalResizeCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.srv.HostUUID(), + ServerNamespace: s.srv.GetNamespace(), + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(sid), + }, + UserMetadata: events.UserMetadata{ + User: ctx.Identity.TeleportUser, + Login: ctx.Identity.Login, + }, + TerminalSize: params.Serialize(), } // Report the updated window size to the event log (this is so the sessions // can be replayed correctly). - ctx.session.emitAuditEvent(events.TerminalResize, resizeEvent) + if err := ctx.session.recorder.EmitAuditEvent(s.srv.Context(), resizeEvent); err != nil { + s.log.WithError(err).Warn("Failed to emit resize audit event.") + } // Update the size of the server side PTY. err := ctx.session.term.SetWinSize(params) @@ -382,7 +432,7 @@ func (s *SessionRegistry) NotifyWinChange(params rsession.TerminalParams, ctx *S // If sessions are being recorded at the proxy, sessions can not be shared. // In that situation, PTY size information does not need to be propagated // back to all clients and we can return right away. - if ctx.ClusterConfig.GetSessionRecording() == services.RecordAtProxy { + if services.IsRecordAtProxy(ctx.ClusterConfig.GetSessionRecording()) { return nil } @@ -471,7 +521,7 @@ type session struct { closeOnce sync.Once - recorder events.SessionRecorder + recorder events.StreamWriter // hasEnhancedRecording returns true if this session has enhanced session // recording events associated. @@ -561,6 +611,12 @@ func (s *session) PID() int { return s.term.PID() } +// Recorder returns a events.SessionRecorder which can be used to emit events +// to a session as well as the audit log. +func (s *session) Recorder() events.StreamWriter { + return s.recorder +} + // Close ends the active session forcing all clients to disconnect and freeing all resources func (s *session) Close() error { s.closeOnce.Do(func() { @@ -569,7 +625,7 @@ func (s *session) Close() error { // (session writer) will try to close this session, causing a deadlock // because of closeOnce go func() { - s.log.Infof("Closing session %v", s.id) + s.log.Infof("Closing session %v.", s.id) if s.term != nil { s.term.Close() } @@ -579,7 +635,7 @@ func (s *session) Close() error { s.writer.Lock() defer s.writer.Unlock() for writerName, writer := range s.writer.writers { - s.log.Infof("Closing session writer: %v", writerName) + s.log.Debugf("Closing session writer: %v.", writerName) closer, ok := io.Writer(writer).(io.WriteCloser) if ok { closer.Close() @@ -607,26 +663,32 @@ func (s *session) startInteractive(ch ssh.Channel, ctx *ServerContext) error { // create a new "party" (connected client) p := newParty(s, ch, ctx) - // Get the audit log from the server and create a session recorder. this will - // be a discard audit log if the proxy is in recording mode and a teleport - // node so we don't create double recordings. - auditLog := s.registry.srv.GetAuditLog() - if auditLog == nil || isDiscardAuditLog(auditLog) { - s.recorder = &events.DiscardRecorder{} + // Nodes discard events in cases when proxies are already recording them. + if s.registry.srv.Component() == teleport.ComponentNode && + services.IsRecordAtProxy(ctx.ClusterConfig.GetSessionRecording()) { + s.recorder = &events.DiscardStream{} } else { - s.recorder, err = events.NewForwardRecorder(events.ForwardRecorderConfig{ - DataDir: filepath.Join(ctx.srv.GetDataDir(), teleport.LogsDir), - SessionID: s.id, - Namespace: ctx.srv.GetNamespace(), - RecordSessions: ctx.ClusterConfig.GetSessionRecording() != services.RecordOff, - Component: teleport.Component(teleport.ComponentSession, ctx.srv.Component()), - ForwardTo: auditLog, + streamer, err := s.newStreamer(ctx) + if err != nil { + return trace.Wrap(err) + } + s.recorder, err = events.NewAuditWriter(events.AuditWriterConfig{ + // Audit stream is using server context, not session context, + // to make sure that session is uploaded even after it is closed + Context: ctx.srv.Context(), + Streamer: streamer, + Clock: ctx.srv.GetClock(), + SessionID: s.id, + Namespace: ctx.srv.GetNamespace(), + ServerID: ctx.srv.HostUUID(), + RecordOutput: ctx.ClusterConfig.GetSessionRecording() != services.RecordOff, + Component: teleport.Component(teleport.ComponentSession, ctx.srv.Component()), }) if err != nil { return trace.Wrap(err) } } - s.writer.addWriter("session-recorder", s.recorder, true) + s.writer.addWriter("session-recorder", utils.WriteCloserWithContext(ctx.srv.Context(), s.recorder), true) // allocate a terminal or take the one previously allocated via a // seaprate "allocate TTY" SSH request @@ -651,8 +713,9 @@ func (s *session) startInteractive(ch ssh.Channel, ctx *ServerContext) error { // Open a BPF recording session. If BPF was not configured, not available, // or running in a recording proxy, OpenSession is a NOP. sessionContext := &bpf.SessionContext{ + Context: ctx.srv.Context(), PID: s.term.PID(), - AuditLog: s.recorder.GetAuditLog(), + Emitter: s.recorder, Namespace: ctx.srv.GetNamespace(), SessionID: s.id.String(), ServerID: ctx.srv.HostUUID(), @@ -677,22 +740,37 @@ func (s *session) startInteractive(ch ssh.Channel, ctx *ServerContext) error { params := s.term.GetTerminalParams() // Emit "new session created" event for the interactive session. - eventFields := events.EventFields{ - events.EventNamespace: ctx.srv.GetNamespace(), - events.SessionEventID: string(s.id), - events.SessionServerID: ctx.srv.HostUUID(), - events.EventLogin: ctx.Identity.Login, - events.EventUser: ctx.Identity.TeleportUser, - events.RemoteAddr: ctx.ServerConn.RemoteAddr().String(), - events.TerminalSize: params.Serialize(), - events.SessionServerHostname: ctx.srv.GetInfo().GetHostname(), - events.SessionServerLabels: ctx.srv.GetInfo().GetAllLabels(), + sessionStartEvent := &events.SessionStart{ + Metadata: events.Metadata{ + Type: events.SessionStartEvent, + Code: events.SessionStartCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.srv.HostUUID(), + ServerLabels: ctx.srv.GetInfo().GetAllLabels(), + ServerHostname: ctx.srv.GetInfo().GetHostname(), + ServerNamespace: ctx.srv.GetNamespace(), + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(s.id), + }, + UserMetadata: events.UserMetadata{ + User: ctx.Identity.TeleportUser, + Login: ctx.Identity.Login, + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: ctx.ServerConn.RemoteAddr().String(), + }, + TerminalSize: params.Serialize(), } + // Local address only makes sense for non-tunnel nodes. if !ctx.srv.UseTunnel() { - eventFields[events.LocalAddr] = ctx.ServerConn.LocalAddr().String() + sessionStartEvent.ConnectionMetadata.LocalAddr = ctx.ServerConn.LocalAddr().String() + } + if err := s.recorder.EmitAuditEvent(ctx.srv.Context(), sessionStartEvent); err != nil { + s.log.WithError(err).Warn("Failed to emit session start event.") } - s.emitAuditEvent(events.SessionStart, eventFields) // Start a heartbeat that marks this session as active with current members // of party in the backend. @@ -771,20 +849,26 @@ func (s *session) startInteractive(ch ssh.Channel, ctx *ServerContext) error { func (s *session) startExec(channel ssh.Channel, ctx *ServerContext) error { var err error - // Get the audit log from the server and create a session recorder. this will - // be a discard audit log if the proxy is in recording mode and a teleport - // node so we don't create double recordings. - auditLog := s.registry.srv.GetAuditLog() - if auditLog == nil || isDiscardAuditLog(auditLog) { - s.recorder = &events.DiscardRecorder{} + // Nodes discard events in cases when proxies are already recording them. + if s.registry.srv.Component() == teleport.ComponentNode && + services.IsRecordAtProxy(ctx.ClusterConfig.GetSessionRecording()) { + s.recorder = &events.DiscardStream{} } else { - s.recorder, err = events.NewForwardRecorder(events.ForwardRecorderConfig{ - DataDir: filepath.Join(ctx.srv.GetDataDir(), teleport.LogsDir), - SessionID: s.id, - Namespace: ctx.srv.GetNamespace(), - RecordSessions: ctx.ClusterConfig.GetSessionRecording() != services.RecordOff, - Component: teleport.Component(teleport.ComponentSession, ctx.srv.Component()), - ForwardTo: auditLog, + streamer, err := s.newStreamer(ctx) + if err != nil { + return trace.Wrap(err) + } + s.recorder, err = events.NewAuditWriter(events.AuditWriterConfig{ + // Audit stream is using server context, not session context, + // to make sure that session is uploaded even after it is closed + Context: ctx.srv.Context(), + Streamer: streamer, + SessionID: s.id, + Clock: ctx.srv.GetClock(), + Namespace: ctx.srv.GetNamespace(), + ServerID: ctx.srv.HostUUID(), + RecordOutput: ctx.ClusterConfig.GetSessionRecording() != services.RecordOff, + Component: teleport.Component(teleport.ComponentSession, ctx.srv.Component()), }) if err != nil { return trace.Wrap(err) @@ -792,21 +876,35 @@ func (s *session) startExec(channel ssh.Channel, ctx *ServerContext) error { } // Emit a session.start event for the exec session. - eventFields := events.EventFields{ - events.EventNamespace: ctx.srv.GetNamespace(), - events.SessionEventID: string(s.id), - events.SessionServerID: ctx.srv.HostUUID(), - events.EventLogin: ctx.Identity.Login, - events.EventUser: ctx.Identity.TeleportUser, - events.RemoteAddr: ctx.ServerConn.RemoteAddr().String(), - events.SessionServerHostname: ctx.srv.GetInfo().GetHostname(), - events.SessionServerLabels: ctx.srv.GetInfo().GetAllLabels(), + sessionStartEvent := &events.SessionStart{ + Metadata: events.Metadata{ + Type: events.SessionStartEvent, + Code: events.SessionStartCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.srv.HostUUID(), + ServerLabels: ctx.srv.GetInfo().GetAllLabels(), + ServerHostname: ctx.srv.GetInfo().GetHostname(), + ServerNamespace: ctx.srv.GetNamespace(), + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(s.id), + }, + UserMetadata: events.UserMetadata{ + User: ctx.Identity.TeleportUser, + Login: ctx.Identity.Login, + }, + ConnectionMetadata: events.ConnectionMetadata{ + RemoteAddr: ctx.ServerConn.RemoteAddr().String(), + }, } // Local address only makes sense for non-tunnel nodes. if !ctx.srv.UseTunnel() { - eventFields[events.LocalAddr] = ctx.ServerConn.LocalAddr().String() + sessionStartEvent.ConnectionMetadata.LocalAddr = ctx.ServerConn.LocalAddr().String() + } + if err := s.recorder.EmitAuditEvent(ctx.srv.Context(), sessionStartEvent); err != nil { + ctx.WithError(err).Warn("Failed to emit session start event.") } - s.emitAuditEvent(events.SessionStart, eventFields) // Start execution. If the program failed to start, send that result back. // Note this is a partial start. Teleport will have re-exec'ed itself and @@ -823,8 +921,9 @@ func (s *session) startExec(channel ssh.Channel, ctx *ServerContext) error { // Open a BPF recording session. If BPF was not configured, not available, // or running in a recording proxy, OpenSession is a NOP. sessionContext := &bpf.SessionContext{ + Context: ctx.srv.Context(), PID: ctx.ExecRequest.PID(), - AuditLog: s.recorder.GetAuditLog(), + Emitter: s.recorder, Namespace: ctx.srv.GetNamespace(), SessionID: string(s.id), ServerID: ctx.srv.HostUUID(), @@ -870,22 +969,40 @@ func (s *session) startExec(channel ssh.Channel, ctx *ServerContext) error { start, end := s.startTime, time.Now().UTC() // Emit a session.end event for this (exec) session. - eventFields := events.EventFields{ - events.SessionEventID: string(s.id), - events.SessionServerID: ctx.srv.HostUUID(), - events.EventNamespace: ctx.srv.GetNamespace(), - events.SessionInteractive: false, - events.SessionEnhancedRecording: s.hasEnhancedRecording, - events.SessionServerHostname: ctx.srv.GetInfo().GetHostname(), - events.SessionServerAddr: ctx.srv.GetInfo().GetAddr(), - events.SessionStartTime: start, - events.SessionEndTime: end, - events.EventUser: ctx.Identity.TeleportUser, + sessionEndEvent := &events.SessionEnd{ + Metadata: events.Metadata{ + Type: events.SessionEndEvent, + Code: events.SessionEndCode, + }, + ServerMetadata: events.ServerMetadata{ + ServerID: ctx.srv.HostUUID(), + ServerNamespace: ctx.srv.GetNamespace(), + ServerHostname: ctx.srv.GetInfo().GetHostname(), + ServerAddr: ctx.srv.GetInfo().GetAddr(), + }, + SessionMetadata: events.SessionMetadata{ + SessionID: string(s.id), + }, + UserMetadata: events.UserMetadata{ + User: ctx.Identity.TeleportUser, + Login: ctx.Identity.Login, + }, + EnhancedRecording: s.hasEnhancedRecording, + Interactive: false, + Participants: []string{ + ctx.Identity.TeleportUser, + }, + StartTime: start, + EndTime: end, + } + if err := s.recorder.EmitAuditEvent(ctx.srv.Context(), sessionEndEvent); err != nil { + ctx.WithError(err).Warn("Failed to emit session end event.") } - s.emitAuditEvent(events.SessionEnd, eventFields) // Close recorder to free up associated resources and flush data. - s.recorder.Close() + if err := s.recorder.Close(ctx.srv.Context()); err != nil { + ctx.WithError(err).Warn("Failed to close recorder.") + } // Close the session. err = s.Close() @@ -907,6 +1024,34 @@ func (s *session) startExec(channel ssh.Channel, ctx *ServerContext) error { return nil } +// newStreamer returns sync or async streamer based on the configuration +// of the server and the session, sync streamer sends the events +// directly to the auth server and blocks if the events can not be received, +// async streamer buffers the events to disk and uploads the events later +func (s *session) newStreamer(ctx *ServerContext) (events.Streamer, error) { + mode := ctx.ClusterConfig.GetSessionRecording() + if services.IsRecordSync(mode) { + s.log.Debugf("Using sync streamer for session %v.", s.id) + return ctx.srv, nil + } + s.log.Debugf("Using async streamer for session %v.", s.id) + fileStreamer, err := filesessions.NewStreamer(sessionsStreamingUploadDir(ctx)) + if err != nil { + return nil, trace.Wrap(err) + } + // TeeStreamer sends non-print and non disk events + // to the audit log in async mode, while buffering all + // events on disk for further upload at the end of the session + return events.NewTeeStreamer(fileStreamer, ctx.srv), nil +} + +func sessionsStreamingUploadDir(ctx *ServerContext) string { + return filepath.Join( + ctx.srv.GetDataDir(), teleport.LogsDir, teleport.ComponentUpload, + events.StreamingLogsDir, ctx.srv.GetNamespace(), + ) +} + func (s *session) broadcastResult(r ExecResult) { for _, p := range s.parties { p.ctx.SendExecResult(r) @@ -995,7 +1140,7 @@ func (s *session) exportParticipants() []string { func (s *session) heartbeat(ctx *ServerContext) { // If sessions are being recorded at the proxy, an identical version of this // goroutine is running in the proxy, which means it does not need to run here. - if ctx.ClusterConfig.GetSessionRecording() == services.RecordAtProxy && + if services.IsRecordAtProxy(ctx.ClusterConfig.GetSessionRecording()) && s.registry.srv.Component() == teleport.ComponentNode { return } @@ -1096,12 +1241,6 @@ func (s *session) join(ch ssh.Channel, req *ssh.Request, ctx *ServerContext) (*p return p, nil } -func (s *session) emitAuditEvent(e events.Event, f events.EventFields) { - if err := s.recorder.GetAuditLog().EmitAuditEvent(e, f); err != nil { - s.log.Warningf("Failed to emit audit event: %v", err) - } -} - func newMultiWriter() *multiWriter { return &multiWriter{writers: make(map[string]writerWrapper)} } @@ -1249,8 +1388,3 @@ func (p *party) Close() (err error) { }) return err } - -func isDiscardAuditLog(alog events.IAuditLog) bool { - _, ok := alog.(*events.DiscardAuditLog) - return ok -} diff --git a/lib/srv/term.go b/lib/srv/term.go index 47f764d5677ff..6c58851f2f8a2 100644 --- a/lib/srv/term.go +++ b/lib/srv/term.go @@ -108,7 +108,7 @@ func NewTerminal(ctx *ServerContext) (Terminal, error) { // If this is not a Teleport node, find out what mode the cluster is in and // return the correct terminal. - if ctx.ClusterConfig.GetSessionRecording() == services.RecordAtProxy { + if services.IsRecordAtProxy(ctx.ClusterConfig.GetSessionRecording()) { return newRemoteTerminal(ctx) } return newLocalTerminal(ctx) diff --git a/lib/utils/conn.go b/lib/utils/conn.go index f69f18410ade3..7a0f0b6d8b141 100644 --- a/lib/utils/conn.go +++ b/lib/utils/conn.go @@ -19,6 +19,7 @@ package utils import ( "bufio" "fmt" + "io" "io/ioutil" "net" "net/http" @@ -27,6 +28,37 @@ import ( "github.com/gravitational/trace" ) +// NewCloserConn returns new connection wrapper that +// when closed will also close passed closers +func NewCloserConn(conn net.Conn, closers ...io.Closer) *CloserConn { + return &CloserConn{ + Conn: conn, + closers: closers, + } +} + +// CloserConn wraps connection and attaches additional closers to it +type CloserConn struct { + net.Conn + closers []io.Closer +} + +// AddCloser adds any closer in ctx that will be called +// whenever server closes session channel +func (c *CloserConn) AddCloser(closer io.Closer) { + c.closers = append(c.closers, closer) +} + +// Close closes connection and all closers +func (c *CloserConn) Close() error { + var errors []error + for _, closer := range c.closers { + errors = append(errors, closer.Close()) + } + errors = append(errors, c.Conn.Close()) + return trace.NewAggregate(errors...) +} + // Roundtrip is a single connection simplistic HTTP client // that allows us to bypass a connection pool to test load balancing // used in tests, as it only supports GET request on / diff --git a/lib/utils/repeat.go b/lib/utils/repeat.go new file mode 100644 index 0000000000000..12c1ef7d65350 --- /dev/null +++ b/lib/utils/repeat.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "io" +) + +// NewRepeatReader returns a repeat reader +func NewRepeatReader(repeat byte, count int) *RepeatReader { + return &RepeatReader{ + repeat: repeat, + count: count, + } +} + +// RepeatReader repeats the same byte count times +// without allocating any data, the single instance +// of the repeat reader is not goroutine safe +type RepeatReader struct { + repeat byte + count int + read int +} + +// Read copies the same byte over and over to the data count times +func (r *RepeatReader) Read(data []byte) (int, error) { + if r.read >= r.count { + return 0, io.EOF + } + var copied int + for i := 0; i < len(data); i++ { + data[i] = r.repeat + copied++ + r.read++ + if r.read >= r.count { + break + } + } + return copied, nil +} diff --git a/lib/utils/slice.go b/lib/utils/slice.go new file mode 100644 index 0000000000000..d5ad7da3670ac --- /dev/null +++ b/lib/utils/slice.go @@ -0,0 +1,110 @@ +package utils + +import ( + "bytes" + "sync" +) + +// SlicePool manages a pool of slices +// in attempts to manage memory in go more efficiently +// and avoid frequent allocations +type SlicePool interface { + // Zero zeroes slice + Zero(b []byte) + // Get returns a new or already allocated slice + Get() []byte + // Put returns slice back to the pool + Put(b []byte) + // Size returns a slice size + Size() int64 +} + +// NewSliceSyncPool returns a new slice pool, using sync.Pool +// of pre-allocated or newly allocated slices of the predefined size and capacity +func NewSliceSyncPool(sliceSize int64) *SliceSyncPool { + s := &SliceSyncPool{ + sliceSize: sliceSize, + zeroSlice: make([]byte, sliceSize), + } + s.New = func() interface{} { + slice := make([]byte, s.sliceSize) + return &slice + } + return s +} + +// SliceSyncPool is a sync pool of slices (usually large) +// of the same size to optimize memory usage, see sync.Pool for more details +type SliceSyncPool struct { + sync.Pool + sliceSize int64 + zeroSlice []byte +} + +// Zero zeroes slice of any length +func (s *SliceSyncPool) Zero(b []byte) { + if len(b) <= len(s.zeroSlice) { + // zero all bytes in the slice to avoid + // data lingering in memory + copy(b, s.zeroSlice[:len(b)]) + } else { + // use working, but less optimal implementation + for i := 0; i < len(b); i++ { + b[i] = 0 + } + } +} + +// Get returns a new or already allocated slice +func (s *SliceSyncPool) Get() []byte { + pslice := s.Pool.Get().(*[]byte) + return *pslice +} + +// Put returns slice back to the pool +func (s *SliceSyncPool) Put(b []byte) { + s.Zero(b) + s.Pool.Put(&b) +} + +// Size returns a slice size +func (s *SliceSyncPool) Size() int64 { + return s.sliceSize +} + +// NewBufferSyncPool returns a new instance of sync pool of bytes.Buffers +// that creates new buffers with preallocated underlying buffer of size +func NewBufferSyncPool(size int64) *BufferSyncPool { + return &BufferSyncPool{ + size: size, + Pool: sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, size)) + }, + }, + } +} + +// BufferSyncPool is a sync pool of bytes.Buffer +type BufferSyncPool struct { + sync.Pool + size int64 +} + +// Put resets the buffer (does not free the memory) +// and returns it back to the pool. Users should be careful +// not to use the buffer (e.g. via Bytes) after it was returned +func (b *BufferSyncPool) Put(buf *bytes.Buffer) { + buf.Reset() + b.Pool.Put(buf) +} + +// Get returns a new or already allocated buffer +func (b *BufferSyncPool) Get() *bytes.Buffer { + return b.Pool.Get().(*bytes.Buffer) +} + +// Size returns default allocated buffer size +func (b *BufferSyncPool) Size() int64 { + return b.size +} diff --git a/lib/utils/slice_test.go b/lib/utils/slice_test.go new file mode 100644 index 0000000000000..a5fb29141bc56 --- /dev/null +++ b/lib/utils/slice_test.go @@ -0,0 +1,23 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestSlice tests sync pool holding slices - SliceSyncPool +func TestSlice(t *testing.T) { + pool := NewSliceSyncPool(1024) + // having a loop is not a guarantee that the same slice + // will be reused, but a good enough bet + for i := 0; i < 10; i++ { + slice := pool.Get() + assert.Len(t, slice, 1024, "Returned slice should have zero len and values") + for i := range slice { + assert.Equal(t, slice[i], byte(0), "Each slice element is zero byte") + } + copy(slice, []byte("just something to fill with")) + pool.Put(slice) + } +} diff --git a/lib/utils/utils.go b/lib/utils/utils.go index 6fa467c7bd4e0..123569310efc2 100644 --- a/lib/utils/utils.go +++ b/lib/utils/utils.go @@ -17,6 +17,7 @@ limitations under the License. package utils import ( + "context" "encoding/json" "fmt" "io" @@ -39,6 +40,47 @@ import ( log "github.com/sirupsen/logrus" ) +// WriteContextCloser provides close method with context +type WriteContextCloser interface { + Close(ctx context.Context) error + io.Writer +} + +// WriteCloserWithContext converts ContextCloser to io.Closer, +// whenever new Close method will be called, the ctx will be passed to it +func WriteCloserWithContext(ctx context.Context, closer WriteContextCloser) io.WriteCloser { + return &closerWithContext{ + WriteContextCloser: closer, + ctx: ctx, + } +} + +type closerWithContext struct { + WriteContextCloser + ctx context.Context +} + +// Close closes all resources and returns the result +func (c *closerWithContext) Close() error { + return c.WriteContextCloser.Close(c.ctx) +} + +// NilCloser returns closer if it's not nil +// otherwise returns a nop closer +func NilCloser(r io.Closer) io.Closer { + if r == nil { + return &nilCloser{} + } + return r +} + +type nilCloser struct { +} + +func (*nilCloser) Close() error { + return nil +} + // NopWriteCloser returns a WriteCloser with a no-op Close method wrapping // the provided Writer w func NopWriteCloser(r io.Writer) io.WriteCloser { diff --git a/lib/utils/utils_test.go b/lib/utils/utils_test.go index 57d055864e0a4..614f3a7219ab7 100644 --- a/lib/utils/utils_test.go +++ b/lib/utils/utils_test.go @@ -507,3 +507,34 @@ func (s *UtilsSuite) TestStringsSet(c *check.C) { c.Assert(out, check.HasLen, 0) c.Assert(out, check.NotNil) } + +// TestRepeatReader tests repeat reader +func (s *UtilsSuite) TestRepeatReader(c *check.C) { + type tc struct { + repeat byte + count int + expected string + } + tcs := []tc{ + { + repeat: byte('a'), + count: 1, + expected: "a", + }, + { + repeat: byte('a'), + count: 0, + expected: "", + }, + { + repeat: byte('a'), + count: 3, + expected: "aaa", + }, + } + for _, tc := range tcs { + data, err := ioutil.ReadAll(NewRepeatReader(tc.repeat, tc.count)) + c.Assert(err, check.IsNil) + c.Assert(string(data), check.Equals, tc.expected) + } +} diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index 67190d09b6635..ac2ad1626fe87 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -653,7 +653,7 @@ func (h *Handler) getWebConfig(w http.ResponseWriter, r *http.Request, p httprou if err != nil { log.Errorf("Cannot retrieve ClusterConfig: %v.", err) } else { - canJoinSessions = clsCfg.GetSessionRecording() != services.RecordAtProxy + canJoinSessions = services.IsRecordAtProxy(clsCfg.GetSessionRecording()) == false } authSettings := ui.WebConfigAuthSettings{ diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 326069f036946..03afca507c7ac 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -1,5 +1,5 @@ /* -Copyright 2015 Gravitational, Inc. +Copyright 2015-2020 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -71,7 +71,6 @@ import ( "github.com/golang/protobuf/proto" "github.com/jonboulle/clockwork" lemma_secret "github.com/mailgun/lemma/secret" - "github.com/pborman/uuid" "github.com/pquerna/otp/totp" "github.com/sirupsen/logrus" "github.com/tstranex/u2f" @@ -164,7 +163,13 @@ func (s *WebSuite) SetUpTest(c *C) { signer, err := sshutils.NewSigner(certs.Key, certs.Cert) c.Assert(err, IsNil) - nodeClient, err := s.server.NewClient(auth.TestBuiltin(teleport.RoleNode)) + nodeID := "node" + nodeClient, err := s.server.NewClient(auth.TestIdentity{ + I: auth.BuiltinRole{ + Role: teleport.RoleNode, + Username: nodeID, + }, + }) c.Assert(err, IsNil) // create SSH service: @@ -177,10 +182,11 @@ func (s *WebSuite) SetUpTest(c *C) { nodeDataDir, "", utils.NetAddr{}, + regular.SetUUID(nodeID), regular.SetNamespace(defaults.Namespace), regular.SetShell("/bin/sh"), regular.SetSessionServer(nodeClient), - regular.SetAuditLog(nodeClient), + regular.SetEmitter(nodeClient), regular.SetPAMConfig(&pam.Config{Enabled: false}), regular.SetBPF(&bpf.NOP{}), ) @@ -192,7 +198,13 @@ func (s *WebSuite) SetUpTest(c *C) { c.Assert(auth.CreateUploaderDir(nodeDataDir), IsNil) // create reverse tunnel service: - s.proxyClient, err = s.server.NewClient(auth.TestBuiltin(teleport.RoleProxy)) + proxyID := "proxy" + s.proxyClient, err = s.server.NewClient(auth.TestIdentity{ + I: auth.BuiltinRole{ + Role: teleport.RoleProxy, + Username: proxyID, + }, + }) c.Assert(err, IsNil) revTunListener, err := net.Listen("tcp", fmt.Sprintf("%v:0", s.server.ClusterName())) @@ -206,6 +218,7 @@ func (s *WebSuite) SetUpTest(c *C) { HostSigners: []ssh.Signer{signer}, LocalAuthClient: s.proxyClient, LocalAccessPoint: s.proxyClient, + Emitter: s.proxyClient, NewCachingAccessPoint: auth.NoCache, DirectClusters: []reversetunnel.DirectCluster{{Name: s.server.ClusterName(), Client: s.proxyClient}}, DataDir: c.MkDir(), @@ -222,9 +235,10 @@ func (s *WebSuite) SetUpTest(c *C) { c.MkDir(), "", utils.NetAddr{}, + regular.SetUUID(proxyID), regular.SetProxyMode(revTunServer), regular.SetSessionServer(s.proxyClient), - regular.SetAuditLog(s.proxyClient), + regular.SetEmitter(s.proxyClient), regular.SetNamespace(defaults.Namespace), regular.SetBPF(&bpf.NOP{}), ) @@ -1679,75 +1693,61 @@ func (s *WebSuite) TestConstructSSHResponseLegacy(c *C) { // TestSearchClusterEvents makes sure web API allows querying events by type. func (s *WebSuite) TestSearchClusterEvents(c *C) { - e1 := events.EventFields{ - events.EventID: uuid.New(), - events.EventType: "event.1", - events.EventCode: "event.1", - events.EventTime: s.clock.Now().Format(time.RFC3339), - } - e2 := events.EventFields{ - events.EventID: uuid.New(), - events.EventType: "event.2", - events.EventCode: "event.2", - events.EventTime: s.clock.Now().Format(time.RFC3339), - } - e3 := events.EventFields{ - events.EventID: uuid.New(), - events.EventType: "event.3", - events.EventCode: "event.3", - events.EventTime: s.clock.Now().Format(time.RFC3339), - } - e4 := events.EventFields{ - events.EventID: uuid.New(), - events.EventType: "event.3", - events.EventCode: "event.3", - events.EventTime: s.clock.Now().Format(time.RFC3339), - } - e5 := events.EventFields{ - events.EventID: uuid.New(), - events.EventType: "event.1", - events.EventCode: "event.1", - events.EventTime: s.clock.Now().Format(time.RFC3339), - } + sessionEvents := events.GenerateTestSession(events.SessionParams{ + PrintEvents: 3, + Clock: s.clock, + ServerID: s.proxy.ID(), + }) - for _, e := range []events.EventFields{e1, e2, e3, e4, e5} { - c.Assert(s.proxyClient.EmitAuditEvent(events.Event{Name: e.GetType()}, e), IsNil) + for _, e := range sessionEvents { + c.Assert(s.proxyClient.EmitAuditEvent(context.TODO(), e), IsNil) } + sessionStart := sessionEvents[0] + sessionPrint := sessionEvents[1] + sessionEnd := sessionEvents[4] + testCases := []struct { // Comment is the test case description. Comment string // Query is the search query sent to the API. Query url.Values // Result is the expected returned list of events. - Result []events.EventFields + Result []events.AuditEvent }{ { Comment: "Empty query", Query: url.Values{}, - Result: []events.EventFields{e1, e2, e3, e4, e5}, + Result: sessionEvents, }, { - Comment: "Query by single event type", - Query: url.Values{"include": []string{"event.1"}}, - Result: []events.EventFields{e1, e5}, + Comment: "Query by session start event", + Query: url.Values{"include": []string{sessionStart.GetType()}}, + Result: sessionEvents[:1], }, { - Comment: "Query by two event types", - Query: url.Values{"include": []string{"event.2;event.3"}}, - Result: []events.EventFields{e2, e3, e4}, + Comment: "Query session start and session end events", + Query: url.Values{"include": []string{sessionEnd.GetType() + ";" + sessionStart.GetType()}}, + Result: []events.AuditEvent{sessionStart, sessionEnd}, }, { - Comment: "Query with limit", - Query: url.Values{"include": []string{"event.2;event.3"}, "limit": []string{"1"}}, - Result: []events.EventFields{e2}, + Comment: "Query events with filter by type and limit", + Query: url.Values{ + "include": []string{sessionPrint.GetType() + ";" + sessionEnd.GetType()}, + "limit": []string{"1"}, + }, + Result: []events.AuditEvent{sessionPrint}, }, } pack := s.authPack(c, "foo") for _, tc := range testCases { - result := s.searchEvents(c, pack.clt, tc.Query, []string{"event.1", "event.2", "event.3"}) - c.Assert(result, DeepEquals, tc.Result, Commentf(tc.Comment)) + result := s.searchEvents(c, pack.clt, tc.Query, []string{sessionStart.GetType(), sessionPrint.GetType(), sessionEnd.GetType()}) + c.Assert(result, HasLen, len(tc.Result), Commentf(tc.Comment)) + for i, resultEvent := range result { + c.Assert(resultEvent.GetType(), Equals, tc.Result[i].GetType(), Commentf(tc.Comment)) + c.Assert(resultEvent.GetID(), Equals, tc.Result[i].GetID(), Commentf(tc.Comment)) + } } } diff --git a/rfd/0002-streaming.md b/rfd/0002-streaming.md new file mode 100644 index 0000000000000..d84fab6f598cd --- /dev/null +++ b/rfd/0002-streaming.md @@ -0,0 +1,289 @@ +--- +authors: Alexander Klizhentas (sasha@gravitational.com) +state: discussion +--- + +# RFD 2 - Session Streaming + +## What + +Design and API of streaming and storing structured session events. + +## Why + +Existing API and design for sending and storing session events has several +issues. + +In pre 4.3 implementation events were buffered on disk on proxies or nodes. +This required encryption at rest, and allowed attackers to tamper +with event data. Session recording was uploaded as a single tarball, +auth server had to unpack the tarball in memory to validate it's contents, +causing OOM and other performance issues. Events were not structured, and often +clients were omitting and sending wrong fields not validated by the server. + +## Details + +### Structured Events + +Events have been refactored from unstructured to structured definitions generated +from protobuf spec. + +Each event embeds common required metadata: + +```protobuf +// Metadata is a common event metadata +message Metadata { + // Index is a monotonically incremented index in the event sequence + int64 Index = 1; + + // Type is the event type + string Type = 2; + + // ID is a unique event identifier + string ID = 3; + + // Code is a unique event code + string Code = 4; + + // Time is event time + google.protobuf.Timestamp Time = 5; +} +``` + +This metadata is accompanied by common event methods: + +```go +// GetType returns event type +func (m *Metadata) GetType() string { + return m.Type +} + +// SetType sets unique type +func (m *Metadata) SetType(etype string) { + m.Type = etype +} +``` + +That allow every event to have a common interface: + +```go +// AuditEvent represents audit event +type AuditEvent interface { + // ProtoMarshaler implements efficient + // protobuf marshaling methods + ProtoMarshaler + + // GetID returns unique event ID + GetID() string + // SetID sets unique event ID + SetID(id string) +``` + +**Session events** + +Session events embed session metadata: + +``` +// SesssionMetadata is a common session event metadata +message SessionMetadata { + // SessionID is a unique UUID of the session. + string SessionID = 1; +} +``` + +And implement extended interfaces: + +```go +// ServerMetadataGetter represents interface +// that provides information about it's server id +type ServerMetadataGetter interface { + // GetServerID returns event server ID + GetServerID() string + + // GetServerNamespace returns event server namespace + GetServerNamespace() string +} +``` + +This approach allows common event interface to be converted to other +event classes without casting to specific type: + +```go + getter, ok := in.(events.SessionMetadataGetter) + if ok && getter.GetSessionID() != "" { + sessionID = getter.GetSessionID() + } else { +``` + +**Other event types** + +Other event types, such as events dealing with connections embed other metadata, +for example connection metadata events: + +```go +// Connection contains connection info +message ConnectionMetadata { + // LocalAddr is a target address on the host + string LocalAddr = 1 ; + + // RemoteAddr is a client (user's) address + string RemoteAddr = 2; + + // Protocol specifies protocol that was captured + string Protocol = 3; +} +``` + +### Streams + +Streams are continuous sequence of events associated with a session. + +```go +// Stream used to create continuous ordered sequence of events +// associated with a session. +type Stream interface { + // Status returns channel receiving updates about stream status + // last event index that was uploaded and upload ID + Status() <-chan StreamStatus +.... +} +``` + +Streamer is an interface for clients to send session events to the auth +server as a continuous sequence of events: + +```go +// Streamer creates and resumes event streams for session IDs +type Streamer interface { + // CreateAuditStream creates event stream + CreateAuditStream(context.Context, session.ID) (Stream, error) + // ResumeAuditStream resumes the stream for session upload that + // has not been completed yet. + ResumeAuditStream(ctx context.Context, sid session.ID, uploadID string) (Stream, error) +} +``` + +Clients can resume streams that were interrupted using upload ID. + + +Clients can use stream status to create back-pressure +- stop sending until streams reports events uploaded - +or resume the upload without re sending all events. + +### Uploaders + +`MultipartUploader` interface handles multipart uploads and downloads for session streams. + +```go +type MultipartUploader interface { + // CreateUpload creates a multipart upload + CreateUpload(ctx context.Context, sessionID session.ID) (*StreamUpload, error) + // CompleteUpload completes the upload + CompleteUpload(ctx context.Context, upload StreamUpload, parts []StreamPart) error + // UploadPart uploads part and returns the part + UploadPart(ctx context.Context, upload StreamUpload, partNumber int64, partBody io.ReadSeeker) (*StreamPart, error) + // ListParts returns all uploaded parts for the completed upload in sorted order + ListParts(ctx context.Context, upload StreamUpload) ([]StreamPart, error) + // ListUploads lists uploads that have been initated but not completed with + // earlier uploads returned first + ListUploads(ctx context.Context) ([]StreamUpload, error) +} +``` + +Uploaders provide abstraction over multipart upload API, specifically S3 for AWS and GCS for Google. +The stream on-disk format is optimized to support parallel uploads of events to S3 and resuming of uploads. + +### Session events storage format + +The storage format for session recordings is designed for fast marshal and unmarshal +using protobuf, compression using gzip and support for parallel uploads to S3 or GCS storage. + +Unlike previous file recording format using JSON and storing multiple files in a tarball, +V1 format represents session as continuous globally ordered sequence of events +serialized to protobuf. + +Each session is stored in one or many slices. Each slice is composed of three parts: + +1. Slice starts with 24 bytes version header: + + * 8 bytes for the format version (used for future expansion) + * 8 bytes for meaningful size of the part + * 8 bytes for padding at the end of the slice (if present) + +2. Slice body is gzipped protobuf messages in binary format. + +3. Optional padding if specified in the header is required to +ensure that slices are of the minimum slice size. + +The slice size is determined by S3 multipart upload requirements: + +https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html + +This design allows the streamer to upload slices S3-compatible APIs +in parallel without buffering to disk. + +### GRPC + +Nodes and proxies are using GRPC interface implementation to submit +individual global events and create and resume streams. + +**GRPC/HTTPs protocol switching** + +[ServeHTTP](https://godoc.org/google.golang.org/grpc#Server.ServeHTTP) +compatibility handler used to serve GRPC over HTTPs connection had to be replaced with +native GRPC transport, because of the problems described [here](https://github.com/gravitational/oom). + +Because of that protocol switching has to be done on TLS level using NextProto. + +### Sync and async streams + +The V0 stream implementation is async - the sessions are streamed on disk of +proxy and node and then uploaded as a single tarball. + +This created performance and stability problems for large uploads, teleport was consuming +all disk space with multipart uploads and security +issues - storage on disk required disk encryption to support FedRamp mode. + +In V1 sync and async streams are using the same GRPC API. The only difference +is that in async mode, proxy and nodes are first storing events on disk +and later replay the events to GRPC, while in sync mode clients send GRPC +events as the session generates them. + +Each session chooses sync or async emitter based on the cluster configuration +when session is started. + +**Sync streams** + +New recording modes `proxy-sync` and `node-sync` cause proxy and node send events +directly to the auth server that uploads the recordings to external storage +without buffering the records on disk. + +This created potential problem of resuming the session stream. +The new audit writer takes advantage of stream status reporting and a +new option to resume stream to replay the events that have not been uploaded to the storage. + +Auth server never stores any local data of the stream and instead initiates +[multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html), +it can be resumed by any other auth server. The loss of the single auth server +will not lead to sync sessions termination if another auth server is available +to resume the stream. + +**Async streams** + +Default mode remains async, the file uploader the events on disk in the new protobuf format. + +Disk uploader now attempts to resume the upload to the auth server based on the +last reported status if possible. This solves the problem of very large uploads +interrupted because of the server overload or intermittent network problems and +auth server can check every event when received, unlike in V0 that required +the tarball to be unpacked first. + +### Completing interrupted sessions + +In teleport 4.3 and earlier some streams and sessions were never uploaded +to the auth server. The session would stay on the proxy or node without being +uploaded for example in cases when node or proxy crashed before marking +the session on disk as complete. + +Switching to S3 based multipart upload API allows auth server to watch uploads +that haven't been completed over grace period (> 12 hours) and complete them. diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go new file mode 100644 index 0000000000000..3893c02d46241 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1421 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isWkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + if s < 0 { + ns = -ns + } + x := fmt.Sprintf("%d.%09d", s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Change the property + // of the child types, use CustomType as a passer. CastType currently property is + // not used in json encoding. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.MapValProp.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(isWkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/vendor/github.com/gravitational/oxy/connlimit/connlimit.go b/vendor/github.com/gravitational/oxy/connlimit/connlimit.go index 9ae6236f36a3f..3466d448c3f69 100644 --- a/vendor/github.com/gravitational/oxy/connlimit/connlimit.go +++ b/vendor/github.com/gravitational/oxy/connlimit/connlimit.go @@ -9,7 +9,7 @@ import ( "github.com/gravitational/oxy/utils" ) -// Limiter tracks concurrent connection per token +// ConnLimiter tracks concurrent connection per token // and is capable of rejecting connections if they are failed type ConnLimiter struct { mutex *sync.Mutex @@ -23,6 +23,7 @@ type ConnLimiter struct { log utils.Logger } +// New returns a new connection limiter func New(next http.Handler, extract utils.SourceExtractor, maxConnections int64, options ...ConnLimitOption) (*ConnLimiter, error) { if extract == nil { return nil, fmt.Errorf("Extract function can not be nil") @@ -49,10 +50,13 @@ func New(next http.Handler, extract utils.SourceExtractor, maxConnections int64, return cl, nil } +// Wrap wraps HTTP handler func (cl *ConnLimiter) Wrap(h http.Handler) { cl.next = h } +// ServeHTTP reserves a connection, sends requests to handler and releases +// the connection func (cl *ConnLimiter) ServeHTTP(w http.ResponseWriter, r *http.Request) { token, amount, err := cl.extract.Extract(r) if err != nil { @@ -60,18 +64,19 @@ func (cl *ConnLimiter) ServeHTTP(w http.ResponseWriter, r *http.Request) { cl.errHandler.ServeHTTP(w, r, err) return } - if err := cl.acquire(token, amount); err != nil { + if err := cl.Acquire(token, amount); err != nil { cl.log.Infof("limiting request source %s: %v", token, err) cl.errHandler.ServeHTTP(w, r, err) return } - defer cl.release(token, amount) + defer cl.Release(token, amount) cl.next.ServeHTTP(w, r) } -func (cl *ConnLimiter) acquire(token string, amount int64) error { +// Acquire tries to acquire connections for a token +func (cl *ConnLimiter) Acquire(token string, amount int64) error { cl.mutex.Lock() defer cl.mutex.Unlock() @@ -85,7 +90,8 @@ func (cl *ConnLimiter) acquire(token string, amount int64) error { return nil } -func (cl *ConnLimiter) release(token string, amount int64) { +// Release releases connections for token +func (cl *ConnLimiter) Release(token string, amount int64) { cl.mutex.Lock() defer cl.mutex.Unlock() diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go index 2663c37e3f893..6d44ae5c096c0 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -173,13 +173,13 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { timer := time.AfterFunc(ccc.timeout, func() { ccc.mu.Lock() + defer ccc.mu.Unlock() if entry.abortDeleting { return } ccc.cc.RemoveSubConn(sc) delete(ccc.subConnToAddr, sc) delete(ccc.subConnCache, addr) - ccc.mu.Unlock() }) entry.cancel = func() { if !timer.Stop() { diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go new file mode 100644 index 0000000000000..09564db197fef --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gzip implements and registers the gzip compressor +// during the initialization. +// This package is EXPERIMENTAL. +package gzip + +import ( + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "sync" + + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the gzip compressor. +const Name = "gzip" + +func init() { + c := &compressor{} + c.poolCompressor.New = func() interface{} { + return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type writer struct { + *gzip.Writer + pool *sync.Pool +} + +// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). +// NOTE: this function must only be called during initialization time (i.e. in an init() function), +// and is not thread-safe. +// +// The error returned will be nil if the specified level is valid. +func SetLevel(level int) error { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) + c.poolCompressor.New = func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return &writer{Writer: w, pool: &c.poolCompressor} + } + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + z := c.poolCompressor.Get().(*writer) + z.Writer.Reset(w) + return z, nil +} + +func (z *writer) Close() error { + defer z.pool.Put(z) + return z.Writer.Close() +} + +type reader struct { + *gzip.Reader + pool *sync.Pool +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + z, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil + } + if err := z.Reset(r); err != nil { + c.poolDecompressor.Put(z) + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (n int, err error) { + n, err = z.Reader.Read(p) + if err == io.EOF { + z.pool.Put(z) + } + return n, err +} + +func (c *compressor) Name() string { + return Name +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index c1a8340c5baef..6c1894335a8c5 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -1,5 +1,7 @@ module google.golang.org/grpc +go 1.14 + require ( cloud.google.com/go v0.26.0 // indirect github.com/BurntSushi/toml v0.3.1 // indirect diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 83439b5627d9b..4e26f6a1d6b51 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -138,7 +138,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) // Send initial settings as connection preface to client. - var isettings []http2.Setting + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 9d212867ce2e5..8f5f3349d9063 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -667,6 +667,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList writer: w, fr: http2.NewFramer(w, r), } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5411a73a22e31..588850563852d 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.23.0" +const Version = "1.23.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index b18c553eeee70..96148eb9e07ca 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -160,6 +160,7 @@ github.com/gizak/termui/widgets # github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d ## explicit github.com/gogo/protobuf/gogoproto +github.com/gogo/protobuf/jsonpb github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys @@ -222,7 +223,7 @@ github.com/gravitational/kingpin ## explicit github.com/gravitational/license github.com/gravitational/license/constants -# github.com/gravitational/oxy v0.0.0-20180629203109-e4a7e35311e6 +# github.com/gravitational/oxy v0.0.0-20200916204440-3eb06d921a1d ## explicit github.com/gravitational/oxy/connlimit github.com/gravitational/oxy/forward @@ -543,7 +544,7 @@ google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/googleapis/type/latlng google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.23.0 +# google.golang.org/grpc v1.23.1 ## explicit google.golang.org/grpc google.golang.org/grpc/balancer @@ -566,6 +567,7 @@ google.golang.org/grpc/credentials/google google.golang.org/grpc/credentials/internal google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal