diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index dff5dcd127a6e..e20c2cf262401 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -1627,112 +1627,43 @@ func (a *ServerWithRoles) ListUnifiedResources(ctx context.Context, req *proto.L }() startFetch := time.Now() - unifiedResources, err := a.authServer.UnifiedResourceCache.GetUnifiedResources(ctx) - if err != nil { - return nil, trace.Wrap(err) - } - - elapsedFetch = time.Since(startFetch) - startFilter := time.Now() - for _, resource := range unifiedResources { - switch r := resource.(type) { - case types.Server: - { - if err := a.checkAccessToNode(r); err != nil { - if trace.IsAccessDenied(err) { - continue - } - - return nil, trace.Wrap(err) - } - - filteredResources = append(filteredResources, resource) - } - case types.DatabaseServer: - { - if err := a.checkAccessToDatabase(r.GetDatabase()); err != nil { - if trace.IsAccessDenied(err) { - continue - } - - return nil, trace.Wrap(err) - } - - filteredResources = append(filteredResources, resource) - } - - case types.AppServer: - { - if err := a.checkAccessToApp(r.GetApp()); err != nil { - if trace.IsAccessDenied(err) { - continue - } - - return nil, trace.Wrap(err) - } - - filteredResources = append(filteredResources, resource) - } - case types.SAMLIdPServiceProvider: - { - if err := a.action(apidefaults.Namespace, types.KindSAMLIdPServiceProvider, types.VerbList); err == nil { - filteredResources = append(filteredResources, resource) - } - } - case types.KubeServer: - kube := r.GetCluster() - if err := a.checkAccessToKubeCluster(kube); err != nil { - if trace.IsAccessDenied(err) { - continue - } - - return nil, trace.Wrap(err) - } - - filteredResources = append(filteredResources, kube) - case types.WindowsDesktop: - { - if err := a.checkAccessToWindowsDesktop(r); err != nil { - if trace.IsAccessDenied(err) { - continue - } - - return nil, trace.Wrap(err) - } - - filteredResources = append(filteredResources, resource) - } - } - } - elapsedFilter = time.Since(startFilter) - - if req.SortBy.Field != "" { - if err := filteredResources.SortByCustom(req.SortBy); err != nil { - return nil, trace.Wrap(err, "sorting unified resources") - } - } - - // Apply request filters and get pagination info. - resp, err := local.FakePaginate(filteredResources, local.FakePaginateParams{ - Limit: req.Limit, + filter := services.MatchResourceFilter{ Labels: req.Labels, SearchKeywords: req.SearchKeywords, PredicateExpression: req.PredicateExpression, - StartKey: req.StartKey, Kinds: req.Kinds, - }) + } + + resourceChecker, err := a.newResourceAccessChecker(types.KindUnifiedResource) if err != nil { return nil, trace.Wrap(err) } - paginatedResources, err := a.MakePaginatedResources(types.KindUnifiedResource, resp.Resources) + unifiedResources, nextKey, err := a.authServer.UnifiedResourceCache.IterateUnifiedResources(ctx, func(resource types.ResourceWithLabels) (bool, error) { + if err := resourceChecker.CanAccess(resource); err != nil { + if trace.IsAccessDenied(err) { + return false, nil + } + return false, trace.Wrap(err) + } + match, err := services.MatchResourceByFilters(resource, filter, nil) + return match, trace.Wrap(err) + }, req) + if err != nil { + return nil, trace.Wrap(err, "filtering unified resources") + } + + elapsedFetch = time.Since(startFetch) + elapsedFilter = time.Since(startFilter) + + paginatedResources, err := services.MakePaginatedResources(types.KindUnifiedResource, unifiedResources) if err != nil { return nil, trace.Wrap(err, "making paginated unified resources") } return &proto.ListUnifiedResourcesResponse{ - NextKey: resp.NextKey, + NextKey: nextKey, Resources: paginatedResources, }, nil } @@ -2055,7 +1986,7 @@ func (r resourceChecker) CanAccess(resource types.Resource) error { // newResourceAccessChecker creates a resourceAccessChecker for the provided resource type func (a *ServerWithRoles) newResourceAccessChecker(resource string) (resourceAccessChecker, error) { switch resource { - case types.KindAppServer, types.KindDatabaseServer, types.KindDatabaseService, types.KindWindowsDesktop, types.KindWindowsDesktopService, types.KindNode, types.KindKubeServer, types.KindUserGroup: + case types.KindAppServer, types.KindDatabaseServer, types.KindDatabaseService, types.KindWindowsDesktop, types.KindWindowsDesktopService, types.KindNode, types.KindKubeServer, types.KindUserGroup, types.KindUnifiedResource: return &resourceChecker{AccessChecker: a.context.Checker}, nil default: return nil, trace.BadParameter("could not check access to resource type %s", resource) diff --git a/lib/auth/auth_with_roles_test.go b/lib/auth/auth_with_roles_test.go index 0c3a7c36428b5..73d9f65473d91 100644 --- a/lib/auth/auth_with_roles_test.go +++ b/lib/auth/auth_with_roles_test.go @@ -4188,8 +4188,9 @@ func TestListUnifiedResources_KindsFilter(t *testing.T) { clt, err := srv.NewClient(TestUser(user.GetName())) require.NoError(t, err) resp, err := clt.ListUnifiedResources(ctx, &proto.ListUnifiedResourcesRequest{ - Kinds: []string{types.KindDatabase}, - Limit: 5, + Kinds: []string{types.KindDatabase}, + Limit: 5, + SortBy: types.SortBy{IsDesc: true, Field: types.ResourceMetadataName}, }) require.NoError(t, err) require.Eventually(t, func() bool { @@ -4236,6 +4237,7 @@ func TestListUnifiedResources_WithSearch(t *testing.T) { resp, err := clt.ListUnifiedResources(ctx, &proto.ListUnifiedResourcesRequest{ SearchKeywords: []string{"tifa"}, Limit: 10, + SortBy: types.SortBy{IsDesc: true, Field: types.ResourceMetadataName}, }) require.NoError(t, err) require.Len(t, resp.Resources, 2) @@ -4323,7 +4325,8 @@ func TestListUnifiedResources_MixedAccess(t *testing.T) { require.NoError(t, err) resp, err := clt.ListUnifiedResources(ctx, &proto.ListUnifiedResourcesRequest{ - Limit: 10, + Limit: 10, + SortBy: types.SortBy{IsDesc: true, Field: types.ResourceMetadataName}, }) require.NoError(t, err) require.Len(t, resp.Resources, 6) @@ -4376,6 +4379,7 @@ func TestListUnifiedResources_WithPredicate(t *testing.T) { resp, err := clt.ListUnifiedResources(ctx, &proto.ListUnifiedResourcesRequest{ PredicateExpression: `labels.name == "tifa"`, Limit: 10, + SortBy: types.SortBy{IsDesc: true, Field: types.ResourceMetadataName}, }) require.NoError(t, err) require.Len(t, resp.Resources, 1) @@ -4388,9 +4392,9 @@ func TestListUnifiedResources_WithPredicate(t *testing.T) { // pkg: github.com/gravitational/teleport/lib/auth // BenchmarkListUnifiedResources // BenchmarkListUnifiedResources/simple_labels -// BenchmarkListUnifiedResources/simple_labels-10 1 22900895292 ns/op 15071189320 B/op 272733781 allocs/op +// BenchmarkListUnifiedResources/simple_labels-10 1 653696459 ns/op 480570296 B/op 8241706 allocs/op // PASS -// ok github.com/gravitational/teleport/lib/auth 25.135s +// ok github.com/gravitational/teleport/lib/auth 2.878s func BenchmarkListUnifiedResources(b *testing.B) { const nodeCount = 50_000 const roleCount = 32 @@ -4497,7 +4501,8 @@ func benchmarkListUnifiedResources( for n := 0; n < b.N; n++ { var resources []*proto.PaginatedResource req := &proto.ListUnifiedResourcesRequest{ - Limit: 1_000, + SortBy: types.SortBy{IsDesc: false, Field: types.ResourceMetadataName}, + Limit: 1_000, } for { rsp, err := clt.ListUnifiedResources(ctx, req) diff --git a/lib/services/unified_resource.go b/lib/services/unified_resource.go index ea74624f66526..508a897e1811f 100644 --- a/lib/services/unified_resource.go +++ b/lib/services/unified_resource.go @@ -26,6 +26,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/gravitational/teleport" + "github.com/gravitational/teleport/api/client/proto" apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" @@ -51,8 +52,13 @@ type UnifiedResourceCache struct { mu sync.Mutex log *log.Entry cfg UnifiedResourceCacheConfig - // tree is a BTree with items - tree *btree.BTreeG[*item] + // nameTree is a BTree with items sorted by (hostname)/name/type + nameTree *btree.BTreeG[*item] + // typeTree is a BTree with items sorted by type/(hostname)/name + typeTree *btree.BTreeG[*item] + // resources is a map of all resources currently tracked in the tree + // the key is always name/type + resources map[string]resource initializationC chan struct{} stale bool once sync.Once @@ -80,9 +86,13 @@ func NewUnifiedResourceCache(ctx context.Context, cfg UnifiedResourceCacheConfig trace.Component: cfg.Component, }), cfg: cfg, - tree: btree.NewG(cfg.BTreeDegree, func(a, b *item) bool { + nameTree: btree.NewG(cfg.BTreeDegree, func(a, b *item) bool { return a.Less(b) }), + typeTree: btree.NewG(cfg.BTreeDegree, func(a, b *item) bool { + return a.Less(b) + }), + resources: make(map[string]resource), initializationC: make(chan struct{}), ResourceGetter: cfg.ResourceGetter, cache: lazyCache, @@ -111,72 +121,163 @@ func (cfg *UnifiedResourceCacheConfig) CheckAndSetDefaults() error { // put stores the value into backend (creates if it does not // exist, updates it otherwise) -func (c *UnifiedResourceCache) put(ctx context.Context, i item) error { - if len(i.Key) == 0 { - return trace.BadParameter("missing parameter key") - } +func (c *UnifiedResourceCache) put(ctx context.Context, resource resource) error { c.mu.Lock() defer c.mu.Unlock() - c.tree.ReplaceOrInsert(&i) + key := resourceKey(resource) + c.resources[key] = resource + sortKey := makeResourceSortKey(resource) + c.nameTree.ReplaceOrInsert(&item{Key: sortKey.byName, Value: key}) + c.typeTree.ReplaceOrInsert(&item{Key: sortKey.byType, Value: key}) return nil } -func putResources[T resource](tree *btree.BTreeG[*item], resources []T) { +func putResources[T resource](cache *UnifiedResourceCache, resources []T) { for _, resource := range resources { - tree.ReplaceOrInsert(&item{Key: resourceKey(resource), Value: resource}) + // generate the unique resource key and add the resource to the resources map + key := resourceKey(resource) + cache.resources[key] = resource + + sortKey := makeResourceSortKey(resource) + cache.nameTree.ReplaceOrInsert(&item{Key: sortKey.byName, Value: key}) + cache.typeTree.ReplaceOrInsert(&item{Key: sortKey.byType, Value: key}) } } // delete removes the item by key, returns NotFound error // if item does not exist -func (c *UnifiedResourceCache) delete(ctx context.Context, key []byte) error { - if len(key) == 0 { - return trace.BadParameter("missing parameter key") - } - return c.read(ctx, func(tree *btree.BTreeG[*item]) error { - if _, ok := tree.Delete(&item{Key: key}); !ok { - return trace.NotFound("key %q is not found", string(key)) +func (c *UnifiedResourceCache) delete(ctx context.Context, res types.Resource) error { + key := resourceKey(res) + + // delete generally only sends the id, so we will fetch the actual resource from our resources + // map and generate our sort keys. Then we can delete from the map and all the trees at once + resource := c.resources[key] + + sortKey := makeResourceSortKey(resource) + + return c.read(ctx, func(cache *UnifiedResourceCache) error { + if _, ok := cache.nameTree.Delete(&item{Key: sortKey.byName}); !ok { + return trace.NotFound("key %q is not found in unified cache name sort tree", string(sortKey.byName)) + } + if _, ok := cache.typeTree.Delete(&item{Key: sortKey.byType}); !ok { + return trace.NotFound("key %q is not found in unified cache type sort tree", string(sortKey.byType)) } + // delete from resource map + delete(c.resources, key) return nil }) } -func (c *UnifiedResourceCache) getRange(ctx context.Context, startKey, endKey []byte, limit int) ([]resource, error) { - if len(startKey) == 0 { - return nil, trace.BadParameter("missing parameter startKey") +func (c *UnifiedResourceCache) getSortTree(sortField string) (*btree.BTreeG[*item], error) { + switch sortField { + case sortByName: + return c.nameTree, nil + case sortByKind: + return c.typeTree, nil + default: + return nil, trace.NotImplemented("sorting by %v is not supporting in unified resources", sortField) } - if len(endKey) == 0 { - return nil, trace.BadParameter("missing parameter endKey") + +} + +func (c *UnifiedResourceCache) getRange(ctx context.Context, startKey []byte, matchFn func(types.ResourceWithLabels) (bool, error), req *proto.ListUnifiedResourcesRequest) ([]resource, string, error) { + if len(startKey) == 0 { + return nil, "", trace.BadParameter("missing parameter startKey") } - if limit <= 0 { - limit = backend.DefaultRangeLimit + if req.Limit <= 0 { + req.Limit = backend.DefaultRangeLimit } var res []resource - err := c.read(ctx, func(tree *btree.BTreeG[*item]) error { - tree.AscendRange(&item{Key: startKey}, &item{Key: endKey}, func(item *item) bool { - res = append(res, item.Value) - if limit > 0 && len(res) >= limit { + var nextKey string + err := c.read(ctx, func(cache *UnifiedResourceCache) error { + tree, err := cache.getSortTree(req.SortBy.Field) + if err != nil { + return trace.Wrap(err, "getting sort tree") + } + var iterateRange func(lessOrEqual, greaterThan *item, iterator btree.ItemIteratorG[*item]) + var endKey []byte + if req.SortBy.IsDesc { + iterateRange = tree.DescendRange + endKey = backend.Key(prefix) + } else { + iterateRange = tree.AscendRange + endKey = backend.RangeEnd(backend.Key(prefix)) + } + iterateRange(&item{Key: startKey}, &item{Key: endKey}, func(item *item) bool { + // get resource from resource map + resourceFromMap, ok := cache.resources[item.Value] + if !ok { + // skip and continue + return true + } + + // check if the resource matches our filter + match, err := matchFn(resourceFromMap) + if err != nil { + // do something with this error eventually but continue for now + return true + } + + if !match { + return true + } + + // do we have all we need? set nextKey and stop iterating + // we do this after the matchFn to make sure they have access to the "next" node + if req.Limit > 0 && len(res) >= int(req.Limit) { + nextKey = string(item.Key) return false } + res = append(res, resourceFromMap) return true }) return nil }) if err != nil { - return nil, trace.Wrap(err) + return nil, "", trace.Wrap(err) } if len(res) == backend.DefaultRangeLimit { c.log.Warnf("Range query hit backend limit. (this is a bug!) startKey=%q,limit=%d", startKey, backend.DefaultRangeLimit) } - return res, nil + return res, nextKey, nil +} + +func getStartKey(req *proto.ListUnifiedResourcesRequest) []byte { + // if startkey exists, return it + if req.StartKey != "" { + return []byte(req.StartKey) + } + // if startkey doesnt exist, we check the the sort direction. + // If sort is descending, startkey is end of the list + if req.SortBy.IsDesc { + return backend.RangeEnd(backend.Key(prefix)) + } + // return start of the list + return backend.Key(prefix) +} + +func (c *UnifiedResourceCache) IterateUnifiedResources(ctx context.Context, matchFn func(types.ResourceWithLabels) (bool, error), req *proto.ListUnifiedResourcesRequest) ([]types.ResourceWithLabels, string, error) { + startKey := getStartKey(req) + result, nextKey, err := c.getRange(ctx, startKey, matchFn, req) + if err != nil { + return nil, "", trace.Wrap(err, "getting unified resource range") + } + + resources := make([]types.ResourceWithLabels, 0, len(result)) + for _, item := range result { + resources = append(resources, item.CloneResource()) + } + + return resources, nextKey, nil } -// GetUnifiedResources returns a list of all resources stored in the current unifiedResourceCollector tree +// GetUnifiedResources returns a list of all resources stored in the current unifiedResourceCollector tree in ascending order func (c *UnifiedResourceCache) GetUnifiedResources(ctx context.Context) ([]types.ResourceWithLabels, error) { - result, err := c.getRange(ctx, backend.Key(prefix), backend.RangeEnd(backend.Key(prefix)), backend.NoLimit) + req := &proto.ListUnifiedResourcesRequest{Limit: backend.NoLimit, SortBy: types.SortBy{IsDesc: false, Field: sortByName}} + result, _, err := c.getRange(ctx, backend.Key(prefix), func(rwl types.ResourceWithLabels) (bool, error) { return true, nil }, req) if err != nil { return nil, trace.Wrap(err, "getting unified resource range") } @@ -213,8 +314,55 @@ func newWatcher(ctx context.Context, resourceCache *UnifiedResourceCache, cfg Re return nil } -func resourceKey(resource types.Resource) []byte { - return backend.Key(prefix, resource.GetName(), resource.GetKind()) +// resourceName is a unique name to be used as a key in the resources map +func resourceKey(resource types.Resource) string { + return resource.GetName() + "/" + resource.GetKind() +} + +type resourceSortKey struct { + byName []byte + byType []byte +} + +// resourceSortKey will generate a key to be used in the sort trees +func makeResourceSortKey(resource types.Resource) resourceSortKey { + var name, kind string + // set the kind to the appropriate "contained" type, rather than + // the container type. + switch r := resource.(type) { + case types.Server: + name = r.GetHostname() + "/" + r.GetName() + kind = types.KindNode + case types.AppServer: + app := r.GetApp() + if app != nil { + name = app.GetName() + kind = types.KindApp + } + case types.SAMLIdPServiceProvider: + name = r.GetName() + kind = types.KindApp + case types.KubeServer: + cluster := r.GetCluster() + if cluster != nil { + name = r.GetCluster().GetName() + kind = types.KindKubernetesCluster + } + case types.DatabaseServer: + db := r.GetDatabase() + if db != nil { + name = db.GetName() + kind = types.KindDatabase + } + default: + name = resource.GetName() + kind = resource.GetKind() + } + + return resourceSortKey{ + byName: backend.Key(prefix, name, kind), + byType: backend.Key(prefix, kind, name), + } } func (c *UnifiedResourceCache) getResourcesAndUpdateCurrent(ctx context.Context) error { @@ -255,14 +403,20 @@ func (c *UnifiedResourceCache) getResourcesAndUpdateCurrent(ctx context.Context) c.mu.Lock() defer c.mu.Unlock() - c.tree.Clear(false) - putResources[types.Server](c.tree, newNodes) - putResources[types.DatabaseServer](c.tree, newDbs) - putResources[types.AppServer](c.tree, newApps) - putResources[types.KubeServer](c.tree, newKubes) - putResources[types.SAMLIdPServiceProvider](c.tree, newSAMLApps) - putResources[types.WindowsDesktop](c.tree, newDesktops) - putResources[*accesslist.AccessList](c.tree, newAccessLists) + // empty the trees + c.nameTree.Clear(false) + c.typeTree.Clear(false) + // clear the resource map as well + // c.resources = make(map[string]resource) + clear(c.resources) + + putResources[types.Server](c, newNodes) + putResources[types.DatabaseServer](c, newDbs) + putResources[types.AppServer](c, newApps) + putResources[types.KubeServer](c, newKubes) + putResources[types.SAMLIdPServiceProvider](c, newSAMLApps) + putResources[types.WindowsDesktop](c, newDesktops) + putResources[*accesslist.AccessList](c, newAccessLists) c.stale = false c.defineCollectorAsInitialized() return nil @@ -399,35 +553,39 @@ func (c *UnifiedResourceCache) getAccessLists(ctx context.Context) ([]*accesslis // read applies the supplied closure to either the primary tree or the ttl-based fallback tree depending on // wether or not the cache is currently healthy. locking is handled internally and the passed-in tree should // not be accessed after the closure completes. -func (c *UnifiedResourceCache) read(ctx context.Context, fn func(tree *btree.BTreeG[*item]) error) error { +func (c *UnifiedResourceCache) read(ctx context.Context, fn func(cache *UnifiedResourceCache) error) error { c.mu.Lock() if !c.stale { - fn(c.tree) + fn(c) c.mu.Unlock() return nil } c.mu.Unlock() - ttlTree, err := utils.FnCacheGet(ctx, c.cache, "unified_resources", func(ctx context.Context) (*btree.BTreeG[*item], error) { + ttlCache, err := utils.FnCacheGet(ctx, c.cache, "unified_resources", func(ctx context.Context) (*UnifiedResourceCache, error) { fallbackCache := &UnifiedResourceCache{ cfg: c.cfg, - tree: btree.NewG(c.cfg.BTreeDegree, func(a, b *item) bool { + nameTree: btree.NewG(c.cfg.BTreeDegree, func(a, b *item) bool { + return a.Less(b) + }), + typeTree: btree.NewG(c.cfg.BTreeDegree, func(a, b *item) bool { return a.Less(b) }), + resources: make(map[string]resource), ResourceGetter: c.ResourceGetter, initializationC: make(chan struct{}), } if err := fallbackCache.getResourcesAndUpdateCurrent(ctx); err != nil { return nil, trace.Wrap(err) } - return fallbackCache.tree, nil + return fallbackCache, nil }) c.mu.Lock() if !c.stale { // primary became healthy while we were waiting - fn(c.tree) + fn(c) c.mu.Unlock() return nil } @@ -438,7 +596,7 @@ func (c *UnifiedResourceCache) read(ctx context.Context, fn func(tree *btree.BTr return trace.Wrap(err) } - fn(ttlTree) + fn(ttlCache) return nil } @@ -471,12 +629,9 @@ func (c *UnifiedResourceCache) processEventAndUpdateCurrent(ctx context.Context, switch event.Type { case types.OpDelete: - c.delete(ctx, resourceKey(event.Resource)) + c.delete(ctx, event.Resource) case types.OpPut: - c.put(ctx, item{ - Key: resourceKey(event.Resource), - Value: event.Resource.(resource), - }) + c.put(ctx, event.Resource.(resource)) default: c.log.Warnf("unsupported event type %s.", event.Type) return @@ -534,12 +689,125 @@ type resource interface { } type item struct { - // Key is a key of the key value item + // Key is a key of the key value item. This will be different based on which sorting tree + // the item is in Key []byte - // Value represents a resource such as types.Server or types.DatabaseServer - Value resource + // Value will be the resourceKey used in the resources map to get the resource + Value string +} + +const ( + prefix = "unified_resource" + sortByName string = "name" + sortByKind string = "kind" +) + +// MakePaginatedResources converts a list of resources into a list of paginated proto representations. +func MakePaginatedResources(requestType string, resources []types.ResourceWithLabels) ([]*proto.PaginatedResource, error) { + paginatedResources := make([]*proto.PaginatedResource, 0, len(resources)) + for _, resource := range resources { + var protoResource *proto.PaginatedResource + resourceKind := requestType + if requestType == types.KindUnifiedResource { + resourceKind = resource.GetKind() + } + switch resourceKind { + case types.KindDatabaseServer: + database, ok := resource.(*types.DatabaseServerV3) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_DatabaseServer{DatabaseServer: database}} + case types.KindDatabaseService: + databaseService, ok := resource.(*types.DatabaseServiceV1) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_DatabaseService{DatabaseService: databaseService}} + case types.KindAppServer: + app, ok := resource.(*types.AppServerV3) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_AppServer{AppServer: app}} + case types.KindNode: + srv, ok := resource.(*types.ServerV2) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_Node{Node: srv}} + case types.KindKubeServer: + srv, ok := resource.(*types.KubernetesServerV3) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_KubernetesServer{KubernetesServer: srv}} + case types.KindWindowsDesktop: + desktop, ok := resource.(*types.WindowsDesktopV3) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_WindowsDesktop{WindowsDesktop: desktop}} + case types.KindWindowsDesktopService: + desktopService, ok := resource.(*types.WindowsDesktopServiceV3) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_WindowsDesktopService{WindowsDesktopService: desktopService}} + case types.KindKubernetesCluster: + cluster, ok := resource.(*types.KubernetesClusterV3) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_KubeCluster{KubeCluster: cluster}} + case types.KindUserGroup: + userGroup, ok := resource.(*types.UserGroupV1) + if !ok { + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + + protoResource = &proto.PaginatedResource{Resource: &proto.PaginatedResource_UserGroup{UserGroup: userGroup}} + case types.KindSAMLIdPServiceProvider, types.KindAppOrSAMLIdPServiceProvider: + switch appOrSP := resource.(type) { + case *types.AppServerV3: + protoResource = &proto.PaginatedResource{ + Resource: &proto.PaginatedResource_AppServerOrSAMLIdPServiceProvider{ + AppServerOrSAMLIdPServiceProvider: &types.AppServerOrSAMLIdPServiceProviderV1{ + Resource: &types.AppServerOrSAMLIdPServiceProviderV1_AppServer{ + AppServer: appOrSP, + }, + }, + }} + case *types.SAMLIdPServiceProviderV1: + protoResource = &proto.PaginatedResource{ + Resource: &proto.PaginatedResource_AppServerOrSAMLIdPServiceProvider{ + AppServerOrSAMLIdPServiceProvider: &types.AppServerOrSAMLIdPServiceProviderV1{ + Resource: &types.AppServerOrSAMLIdPServiceProviderV1_SAMLIdPServiceProvider{ + SAMLIdPServiceProvider: appOrSP, + }, + }, + }} + default: + return nil, trace.BadParameter("%s has invalid type %T", resourceKind, resource) + } + default: + return nil, trace.NotImplemented("resource type %s doesn't support pagination", resource.GetKind()) + } + + paginatedResources = append(paginatedResources, protoResource) + } + return paginatedResources, nil } const ( - prefix = "unified_resource" + SortByName string = "name" + SortByKind string = "kind" ) diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index 3bf9c53eed9cc..816c221eeb218 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -2632,6 +2632,9 @@ func (h *Handler) clusterUnifiedResourcesGet(w http.ResponseWriter, request *htt case types.KubeCluster: kube := ui.MakeKubeCluster(r, accessChecker) unifiedResources = append(unifiedResources, kube) + case types.KubeServer: + kube := ui.MakeKubeCluster(r.GetCluster(), accessChecker) + unifiedResources = append(unifiedResources, kube) default: return nil, trace.Errorf("UI Resource has unknown type: %T", resource) } diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 4a5e7537a8516..5394f0c519592 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -1202,7 +1202,7 @@ func TestUnifiedResourcesGet(t *testing.T) { require.NoError(t, err) res = clusterNodesGetResponse{} require.NoError(t, json.Unmarshal(re.Bytes(), &res)) - require.Equal(t, types.KindNode, res.Items[0].Kind) + require.Equal(t, types.KindWindowsDesktop, res.Items[0].Kind) // test with no access noAccessRole, err := types.NewRole(services.RoleNameForUser("test-no-access@example.com"), types.RoleSpecV6{})