diff --git a/api/client/events.go b/api/client/events.go index 70cf6e3630ba5..b3e8c28d61cb9 100644 --- a/api/client/events.go +++ b/api/client/events.go @@ -18,6 +18,7 @@ import ( "github.com/gravitational/trace" "github.com/gravitational/teleport/api/client/proto" + accessmonitoringrulesv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" kubewaitingcontainerpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/kubewaitingcontainer/v1" notificationsv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/notifications/v1" "github.com/gravitational/teleport/api/types" @@ -65,6 +66,10 @@ func EventToGRPC(in types.Event) (*proto.Event, error) { out.Resource = &proto.Event_GlobalNotification{ GlobalNotification: r, } + case *accessmonitoringrulesv1.AccessMonitoringRule: + out.Resource = &proto.Event_AccessMonitoringRule{ + AccessMonitoringRule: r, + } } case *types.ResourceHeader: out.Resource = &proto.Event_ResourceHeader{ @@ -479,6 +484,9 @@ func EventFromGRPC(in *proto.Event) (*types.Event, error) { } else if r := in.GetGlobalNotification(); r != nil { out.Resource = types.Resource153ToLegacy(r) return &out, nil + } else if r := in.GetAccessMonitoringRule(); r != nil { + out.Resource = types.Resource153ToLegacy(r) + return &out, nil } else { return nil, trace.BadParameter("received unsupported resource %T", in.Resource) } diff --git a/integrations/access/common/config.go b/integrations/access/common/config.go index ee640a878bc8f..e664373eae6b7 100644 --- a/integrations/access/common/config.go +++ b/integrations/access/common/config.go @@ -24,6 +24,7 @@ import ( "github.com/gravitational/trace" "github.com/gravitational/teleport/api/client" + accessmonitoringrulesv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" "github.com/gravitational/teleport/integrations/access/common/teleport" @@ -57,6 +58,11 @@ func (w *wrappedClient) ListAccessLists(ctx context.Context, pageSize int, pageT return w.Client.AccessListClient().ListAccessLists(ctx, pageSize, pageToken) } +// ListAccessMonitoringRules lists current access monitoring rules. +func (w *wrappedClient) ListAccessMonitoringRules(ctx context.Context, limit int, startKey string) ([]*accessmonitoringrulesv1.AccessMonitoringRule, string, error) { + return w.Client.AccessMonitoringRulesClient().ListAccessMonitoringRules(ctx, limit, startKey) +} + // wrapAPIClient will wrap the API client such that it conforms to the Teleport plugin client interface. func wrapAPIClient(clt *client.Client) teleport.Client { return &wrappedClient{ diff --git a/integrations/access/common/teleport/client.go b/integrations/access/common/teleport/client.go index 30d670096057f..8e61120314f55 100644 --- a/integrations/access/common/teleport/client.go +++ b/integrations/access/common/teleport/client.go @@ -22,6 +22,7 @@ import ( "context" "github.com/gravitational/teleport/api/client/proto" + accessmonitoringrulesv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" "github.com/gravitational/teleport/integrations/lib/plugindata" @@ -39,4 +40,5 @@ type Client interface { SetAccessRequestState(ctx context.Context, params types.AccessRequestUpdate) error ListResources(ctx context.Context, req proto.ListResourcesRequest) (*types.ListResourcesResponse, error) ListAccessLists(context.Context, int, string) ([]*accesslist.AccessList, string, error) + ListAccessMonitoringRules(ctx context.Context, limit int, startKey string) ([]*accessmonitoringrulesv1.AccessMonitoringRule, string, error) } diff --git a/lib/auth/accesspoint/accesspoint.go b/lib/auth/accesspoint/accesspoint.go index 9dca52fec420a..94f2756b967b6 100644 --- a/lib/auth/accesspoint/accesspoint.go +++ b/lib/auth/accesspoint/accesspoint.go @@ -144,6 +144,7 @@ func NewAccessCache(cfg AccessCacheConfig) (*cache.Cache, error) { Notifications: cfg.Services, Okta: cfg.Services.OktaClient(), AccessLists: cfg.Services.AccessListClient(), + AccessMonitoringRules: cfg.Services.AccessMonitoringRuleClient(), SecReports: cfg.Services.SecReportsClient(), UserLoginStates: cfg.Services.UserLoginStateClient(), Integrations: cfg.Services, diff --git a/lib/auth/api.go b/lib/auth/api.go index 7b5616484cb60..7a105b3f462a7 100644 --- a/lib/auth/api.go +++ b/lib/auth/api.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc" "github.com/gravitational/teleport/api/client/proto" + accessmonitoringrules "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" integrationpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/integration/v1" kubewaitingcontainerpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/kubewaitingcontainer/v1" userspb "github.com/gravitational/teleport/api/gen/proto/go/teleport/users/v1" @@ -1154,6 +1155,11 @@ type Cache interface { // NotificationsGetter defines list methods for notifications. services.NotificationGetter + + // ListAccessMonitoringRules returns a paginated list of access monitoring rules. + ListAccessMonitoringRules(ctx context.Context, limit int, startKey string) ([]*accessmonitoringrules.AccessMonitoringRule, string, error) + // GetAccessMonitoringRule returns the specified access monitoring rule. + GetAccessMonitoringRule(ctx context.Context, name string) (*accessmonitoringrules.AccessMonitoringRule, error) } type NodeWrapper struct { diff --git a/lib/auth/grpcserver.go b/lib/auth/grpcserver.go index 2b5b0141f31d3..f5092cba13ad0 100644 --- a/lib/auth/grpcserver.go +++ b/lib/auth/grpcserver.go @@ -48,6 +48,7 @@ import ( authpb "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/constants" "github.com/gravitational/teleport/api/gen/proto/go/assist/v1" + accessmonitoringrules "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" auditlogpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/auditlog/v1" clusterconfigpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/clusterconfig/v1" dbobjectpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/dbobject/v1" @@ -71,6 +72,7 @@ import ( "github.com/gravitational/teleport/api/types/installers" "github.com/gravitational/teleport/api/types/wrappers" apiutils "github.com/gravitational/teleport/api/utils" + "github.com/gravitational/teleport/lib/accessmonitoringrules/accessmonitoringrulesv1" "github.com/gravitational/teleport/lib/auth/assist/assistv1" "github.com/gravitational/teleport/lib/auth/clusterconfig/clusterconfigv1" "github.com/gravitational/teleport/lib/auth/dbobject/dbobjectv1" @@ -5445,6 +5447,16 @@ func NewGRPCServer(cfg GRPCServerConfig) (*GRPCServer, error) { } kubewaitingcontainerpb.RegisterKubeWaitingContainersServiceServer(server, kubeWaitingContsServer) + accessMonitoringRuleServer, err := accessmonitoringrulesv1.NewService(&accessmonitoringrulesv1.ServiceConfig{ + Authorizer: cfg.Authorizer, + Backend: cfg.AuthServer.Services, + Cache: cfg.AuthServer.Cache, + }) + if err != nil { + return nil, trace.Wrap(err) + } + accessmonitoringrules.RegisterAccessMonitoringRulesServiceServer(server, accessMonitoringRuleServer) + // Only register the service if this is an open source build. Enterprise builds // register the actual service via an auth plugin, if we register here then all // Enterprise builds would fail with a duplicate service registered error. diff --git a/lib/cache/cache.go b/lib/cache/cache.go index 5cd307b9e26d2..920d24df96695 100644 --- a/lib/cache/cache.go +++ b/lib/cache/cache.go @@ -37,6 +37,7 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client/proto" apidefaults "github.com/gravitational/teleport/api/defaults" + accessmonitoringrulesv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" kubewaitingcontainerpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/kubewaitingcontainer/v1" notificationsv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/notifications/v1" userspb "github.com/gravitational/teleport/api/gen/proto/go/teleport/users/v1" @@ -172,6 +173,7 @@ func ForAuth(cfg Config) Config { {Kind: types.KindKubeWaitingContainer}, {Kind: types.KindNotification}, {Kind: types.KindGlobalNotification}, + {Kind: types.KindAccessMonitoringRule}, } cfg.QueueSize = defaults.AuthQueueSize // We don't want to enable partial health for auth cache because auth uses an event stream @@ -549,6 +551,7 @@ type Cache struct { lowVolumeEventsFanout *utils.RoundRobin[*services.FanoutV2] kubeWaitingContsCache *local.KubeWaitingContainerService notificationsCache services.Notifications + accessMontoringRuleCache services.AccessMonitoringRules // closed indicates that the cache has been closed closed atomic.Bool @@ -715,6 +718,8 @@ type Config struct { KubeWaitingContainers services.KubeWaitingContainer // Notifications is the notifications service Notifications services.Notifications + // AccessMonitoringRules is the access monitoring rules service. + AccessMonitoringRules services.AccessMonitoringRules // Backend is a backend for local cache Backend backend.Backend // MaxRetryPeriod is the maximum period between cache retries on failures @@ -920,6 +925,12 @@ func New(config Config) (*Cache, error) { return nil, trace.Wrap(err) } + accessMonitoringRuleCache, err := local.NewAccessMonitoringRulesService(config.Backend) + if err != nil { + cancel() + return nil, trace.Wrap(err) + } + fanout := services.NewFanoutV2(services.FanoutV2Config{}) lowVolumeFanouts := make([]*services.FanoutV2, 0, config.FanoutShards) for i := 0; i < config.FanoutShards; i++ { @@ -956,6 +967,7 @@ func New(config Config) (*Cache, error) { webSessionCache: local.NewIdentityService(config.Backend).WebSessions(), webTokenCache: local.NewIdentityService(config.Backend).WebTokens(), windowsDesktopsCache: local.NewWindowsDesktopService(config.Backend), + accessMontoringRuleCache: accessMonitoringRuleCache, samlIdPServiceProvidersCache: samlIdPServiceProvidersCache, userGroupsCache: userGroupsCache, oktaCache: oktaCache, @@ -3108,11 +3120,38 @@ func (c *Cache) ListGlobalNotifications(ctx context.Context, pageSize int, start return nil, "", trace.Wrap(err) } defer rg.Release() - out, nextKey, err := rg.reader.ListGlobalNotifications(ctx, pageSize, startKey) return out, nextKey, trace.Wrap(err) } +// ListAccessMonitoringRules returns a paginated list of access monitoring rules. +func (c *Cache) ListAccessMonitoringRules(ctx context.Context, pageSize int, nextToken string) ([]*accessmonitoringrulesv1.AccessMonitoringRule, string, error) { + ctx, span := c.Tracer.Start(ctx, "cache/ListAccessMonitoringRules") + defer span.End() + + rg, err := readCollectionCache(c, c.collections.accessMonitoringRules) + + if err != nil { + return nil, "", trace.Wrap(err) + } + defer rg.Release() + out, nextKey, err := rg.reader.ListAccessMonitoringRules(ctx, pageSize, nextToken) + return out, nextKey, trace.Wrap(err) +} + +// GetAccessMonitoringRule returns the specified AccessMonitoringRule resources. +func (c *Cache) GetAccessMonitoringRule(ctx context.Context, name string) (*accessmonitoringrulesv1.AccessMonitoringRule, error) { + ctx, span := c.Tracer.Start(ctx, "cache/GetAccessMonitoringRule") + defer span.End() + + rg, err := readCollectionCache(c, c.collections.accessMonitoringRules) + if err != nil { + return nil, trace.Wrap(err) + } + defer rg.Release() + return rg.reader.GetAccessMonitoringRule(ctx, name) +} + // ListResources is a part of auth.Cache implementation func (c *Cache) ListResources(ctx context.Context, req proto.ListResourcesRequest) (*types.ListResourcesResponse, error) { ctx, span := c.Tracer.Start(ctx, "cache/ListResources") diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go index 30b5f7a4bfa7b..32872b30b6dae 100644 --- a/lib/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -42,6 +42,7 @@ import ( "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/proto" apidefaults "github.com/gravitational/teleport/api/defaults" + accessmonitoringrulesv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" headerv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/header/v1" kubewaitingcontainerpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/kubewaitingcontainer/v1" notificationsv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/notifications/v1" @@ -117,6 +118,7 @@ type testPack struct { accessLists services.AccessLists kubeWaitingContainers services.KubeWaitingContainer notifications services.Notifications + accessMonitoringRules services.AccessMonitoringRules } // testFuncs are functions to support testing an object in a cache. @@ -304,6 +306,11 @@ func newPackWithoutCache(dir string, opts ...packOption) (*testPack, error) { return nil, trace.Wrap(err) } p.accessLists = accessListsSvc + accessMonitoringRuleService, err := local.NewAccessMonitoringRulesService(p.backend) + if err != nil { + return nil, trace.Wrap(err) + } + p.accessMonitoringRules = accessMonitoringRuleService kubeWaitingContSvc, err := local.NewKubeWaitingContainerService(p.backend) if err != nil { @@ -359,6 +366,7 @@ func newPack(dir string, setupConfig func(c Config) Config, opts ...packOption) AccessLists: p.accessLists, KubeWaitingContainers: p.kubeWaitingContainers, Notifications: p.notifications, + AccessMonitoringRules: p.accessMonitoringRules, MaxRetryPeriod: 200 * time.Millisecond, EventsC: p.eventsC, })) @@ -759,6 +767,7 @@ func TestCompletenessInit(t *testing.T) { AccessLists: p.accessLists, KubeWaitingContainers: p.kubeWaitingContainers, Notifications: p.notifications, + AccessMonitoringRules: p.accessMonitoringRules, MaxRetryPeriod: 200 * time.Millisecond, EventsC: p.eventsC, })) @@ -833,6 +842,7 @@ func TestCompletenessReset(t *testing.T) { AccessLists: p.accessLists, KubeWaitingContainers: p.kubeWaitingContainers, Notifications: p.notifications, + AccessMonitoringRules: p.accessMonitoringRules, MaxRetryPeriod: 200 * time.Millisecond, EventsC: p.eventsC, })) @@ -1019,6 +1029,7 @@ func TestListResources_NodesTTLVariant(t *testing.T) { AccessLists: p.accessLists, KubeWaitingContainers: p.kubeWaitingContainers, Notifications: p.notifications, + AccessMonitoringRules: p.accessMonitoringRules, MaxRetryPeriod: 200 * time.Millisecond, EventsC: p.eventsC, neverOK: true, // ensure reads are never healthy @@ -1104,6 +1115,7 @@ func initStrategy(t *testing.T) { AccessLists: p.accessLists, KubeWaitingContainers: p.kubeWaitingContainers, Notifications: p.notifications, + AccessMonitoringRules: p.accessMonitoringRules, MaxRetryPeriod: 200 * time.Millisecond, EventsC: p.eventsC, })) @@ -3099,6 +3111,7 @@ func TestCacheWatchKindExistsInEvents(t *testing.T) { types.KindKubeWaitingContainer: newKubeWaitingContainer(t), types.KindNotification: types.Resource153ToLegacy(newUserNotification(t, "test")), types.KindGlobalNotification: types.Resource153ToLegacy(newGlobalNotification(t, "test")), + types.KindAccessMonitoringRule: types.Resource153ToLegacy(newAccessMonitoringRule(t)), } for name, cfg := range cases { @@ -3587,6 +3600,16 @@ func newGlobalNotification(t *testing.T, description string) *notificationsv1.Gl return notification } +func newAccessMonitoringRule(t *testing.T) *accessmonitoringrulesv1.AccessMonitoringRule { + t.Helper() + notification := &accessmonitoringrulesv1.AccessMonitoringRule{ + Spec: &accessmonitoringrulesv1.AccessMonitoringRuleSpec{ + Notification: &accessmonitoringrulesv1.Notification{}, + }, + } + return notification +} + func withKeepalive[T any](fn func(context.Context, T) (*types.KeepAlive, error)) func(context.Context, T) error { return func(ctx context.Context, resource T) error { _, err := fn(ctx, resource) diff --git a/lib/cache/collections.go b/lib/cache/collections.go index 6d32902fd7a16..1eb3f968684a1 100644 --- a/lib/cache/collections.go +++ b/lib/cache/collections.go @@ -28,6 +28,7 @@ import ( "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/proto" apidefaults "github.com/gravitational/teleport/api/defaults" + accessmonitoringrulesv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accessmonitoringrules/v1" kubewaitingcontainerpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/kubewaitingcontainer/v1" notificationsv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/notifications/v1" userspb "github.com/gravitational/teleport/api/gen/proto/go/teleport/users/v1" @@ -237,6 +238,7 @@ type cacheCollections struct { windowsDesktopServices collectionReader[windowsDesktopServiceGetter] userNotifications collectionReader[notificationGetter] globalNotifications collectionReader[notificationGetter] + accessMonitoringRules collectionReader[accessMonitoringRuleGetter] } // setupCollections returns a registry of collections. @@ -696,6 +698,12 @@ func setupCollections(c *Cache, watches []types.WatchKind) (*cacheCollections, e watch: watch, } collections.byKind[resourceKind] = collections.globalNotifications + case types.KindAccessMonitoringRule: + if c.AccessMonitoringRules == nil { + return nil, trace.BadParameter("missing parameter AccessMonitoringRule") + } + collections.accessMonitoringRules = &genericCollection[*accessmonitoringrulesv1.AccessMonitoringRule, accessMonitoringRuleGetter, accessMonitoringRulesExecutor]{cache: c, watch: watch} + collections.byKind[resourceKind] = collections.accessMonitoringRules default: return nil, trace.BadParameter("resource %q is not supported", watch.Kind) } @@ -2905,7 +2913,6 @@ func (userNotificationExecutor) getAll(ctx context.Context, cache *Cache, loadSe if err != nil { return nil, trace.Wrap(err) } - notifications = append(notifications, notifs...) if nextKey == "" { @@ -3021,3 +3028,51 @@ func (globalNotificationExecutor) getReader(cache *Cache, cacheOK bool) notifica } var _ executor[*notificationsv1.GlobalNotification, notificationGetter] = globalNotificationExecutor{} + +type accessMonitoringRulesExecutor struct{} + +func (accessMonitoringRulesExecutor) getAll(ctx context.Context, cache *Cache, loadSecrets bool) ([]*accessmonitoringrulesv1.AccessMonitoringRule, error) { + var resources []*accessmonitoringrulesv1.AccessMonitoringRule + var nextToken string + for { + var page []*accessmonitoringrulesv1.AccessMonitoringRule + var err error + page, nextToken, err = cache.AccessMonitoringRules.ListAccessMonitoringRules(ctx, 0 /* page size */, nextToken) + if err != nil { + return nil, trace.Wrap(err) + } + resources = append(resources, page...) + + if nextToken == "" { + break + } + } + return resources, nil +} + +func (accessMonitoringRulesExecutor) upsert(ctx context.Context, cache *Cache, resource *accessmonitoringrulesv1.AccessMonitoringRule) error { + _, err := cache.accessMontoringRuleCache.UpsertAccessMonitoringRule(ctx, resource) + return trace.Wrap(err) +} + +func (accessMonitoringRulesExecutor) deleteAll(ctx context.Context, cache *Cache) error { + return cache.accessMontoringRuleCache.DeleteAllAccessMonitoringRules(ctx) +} + +func (accessMonitoringRulesExecutor) delete(ctx context.Context, cache *Cache, resource types.Resource) error { + return cache.accessMontoringRuleCache.DeleteAccessMonitoringRule(ctx, resource.GetName()) +} + +func (accessMonitoringRulesExecutor) isSingleton() bool { return false } + +func (accessMonitoringRulesExecutor) getReader(cache *Cache, cacheOK bool) accessMonitoringRuleGetter { + if cacheOK { + return cache.accessMontoringRuleCache + } + return cache.Config.AccessMonitoringRules +} + +type accessMonitoringRuleGetter interface { + GetAccessMonitoringRule(ctx context.Context, name string) (*accessmonitoringrulesv1.AccessMonitoringRule, error) + ListAccessMonitoringRules(ctx context.Context, limit int, startKey string) ([]*accessmonitoringrulesv1.AccessMonitoringRule, string, error) +} diff --git a/lib/services/local/events.go b/lib/services/local/events.go index 50df4333e46a5..855b813ad0599 100644 --- a/lib/services/local/events.go +++ b/lib/services/local/events.go @@ -201,6 +201,8 @@ func (e *EventsService) NewWatcher(ctx context.Context, watch types.Watch) (type parser = newUserNotificationParser() case types.KindGlobalNotification: parser = newGlobalNotificationParser() + case types.KindAccessMonitoringRule: + parser = newAccessMonitoringRuleParser() default: if watch.AllowPartialSuccess { continue @@ -1952,6 +1954,35 @@ func (p *kubeWaitingContainerParser) parse(event backend.Event) (types.Resource, } } +func newAccessMonitoringRuleParser() *AccessMonitoringRuleParser { + return &AccessMonitoringRuleParser{ + baseParser: newBaseParser(backend.ExactKey(accessMonitoringRulesPrefix)), + } +} + +type AccessMonitoringRuleParser struct { + baseParser +} + +func (p *AccessMonitoringRuleParser) parse(event backend.Event) (types.Resource, error) { + switch event.Type { + case types.OpDelete: + return resourceHeader(event, types.KindAccessMonitoringRule, types.V1, 0) + case types.OpPut: + r, err := services.UnmarshalAccessMonitoringRule(event.Item.Value, + services.WithResourceID(event.Item.ID), + services.WithExpires(event.Item.Expires), + services.WithRevision(event.Item.Revision), + ) + if err != nil { + return nil, trace.Wrap(err) + } + return types.Resource153ToLegacy(r), nil + default: + return nil, trace.BadParameter("event %v is not supported", event.Type) + } +} + func newUserNotificationParser() *userNotificationParser { return &userNotificationParser{ baseParser: newBaseParser(backend.Key(notificationsUserSpecificPrefix)), diff --git a/lib/services/presets.go b/lib/services/presets.go index a42fb21917845..7e8d2dbc48809 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -173,6 +173,7 @@ func NewPresetEditorRole() types.Role { types.NewRule(types.KindAuditQuery, append(RW(), types.VerbUse)), types.NewRule(types.KindAccessGraph, RW()), types.NewRule(types.KindServerInfo, RW()), + types.NewRule(types.KindAccessMonitoringRule, RW()), }, }, }, diff --git a/tool/tctl/common/resource_command.go b/tool/tctl/common/resource_command.go index a9d557d763999..91e6679cadbda 100644 --- a/tool/tctl/common/resource_command.go +++ b/tool/tctl/common/resource_command.go @@ -154,6 +154,7 @@ func (rc *ResourceCommand) Initialize(app *kingpin.Application, config *servicec types.KindBot: rc.createBot, types.KindDatabaseObjectImportRule: rc.createDatabaseObjectImportRule, types.KindDatabaseObject: rc.createDatabaseObject, + types.KindAccessMonitoringRule: rc.createAccessMonitoringRule, } rc.UpdateHandlers = map[ResourceKind]ResourceCreateHandler{ types.KindUser: rc.updateUser, @@ -164,6 +165,7 @@ func (rc *ResourceCommand) Initialize(app *kingpin.Application, config *servicec types.KindClusterNetworkingConfig: rc.updateClusterNetworkingConfig, types.KindClusterAuthPreference: rc.updateAuthPreference, types.KindSessionRecordingConfig: rc.updateSessionRecordingConfig, + types.KindAccessMonitoringRule: rc.updateAccessMonitoringRule, } rc.config = config @@ -1713,6 +1715,11 @@ func (rc *ResourceCommand) Delete(ctx context.Context, client *auth.Client) (err return trace.Wrap(err) } fmt.Printf("Object %q has been deleted\n", rc.ref.Name) + case types.KindAccessMonitoringRule: + if err := client.AccessMonitoringRuleClient().DeleteAccessMonitoringRule(ctx, rc.ref.Name); err != nil { + return trace.Wrap(err) + } + fmt.Printf("Access monitoring rule %q has been deleted\n", rc.ref.Name) default: return trace.BadParameter("deleting resources of type %q is not supported", rc.ref.Kind) } @@ -2919,3 +2926,37 @@ func (rc *ResourceCommand) createSecurityReport(ctx context.Context, client *aut } return nil } + +func (rc *ResourceCommand) createAccessMonitoringRule(ctx context.Context, client *auth.Client, raw services.UnknownResource) error { + in, err := services.UnmarshalAccessMonitoringRule(raw.Raw) + if err != nil { + return trace.Wrap(err) + } + + if rc.IsForced() { + if _, err = client.AccessMonitoringRuleClient().UpsertAccessMonitoringRule(ctx, in); err != nil { + return trace.Wrap(err) + } + fmt.Printf("access monitoring rule %q has been created\n", in.GetMetadata().GetName()) + return nil + } + + if _, err = client.AccessMonitoringRuleClient().CreateAccessMonitoringRule(ctx, in); err != nil { + return trace.Wrap(err) + } + + fmt.Printf("access monitoring rule %q has been created\n", in.GetMetadata().GetName()) + return nil +} + +func (rc *ResourceCommand) updateAccessMonitoringRule(ctx context.Context, client *auth.Client, raw services.UnknownResource) error { + in, err := services.UnmarshalAccessMonitoringRule(raw.Raw) + if err != nil { + return trace.Wrap(err) + } + if _, err := client.AccessMonitoringRuleClient().UpdateAccessMonitoringRule(ctx, in); err != nil { + return trace.Wrap(err) + } + fmt.Printf("access monitoring rule %q has been updated\n", in.GetMetadata().GetName()) + return nil +}