From bccfc278b61868e84185a116739aac83f0b1fbab Mon Sep 17 00:00:00 2001 From: Lisa Gunn Date: Fri, 22 Sep 2023 15:10:15 -0700 Subject: [PATCH 01/22] Reduce the use of capitalized trusted clusters and a few other fixes --- .../access-controls/guides/ip-pinning.mdx | 2 +- docs/pages/access-controls/reference.mdx | 2 +- .../join-token.mdx | 3 +-- docs/pages/architecture/session-recording.mdx | 8 +++--- docs/pages/architecture/tls-routing.mdx | 4 +-- .../connect-your-client/teleport-connect.mdx | 4 +-- docs/pages/connect-your-client/tsh.mdx | 8 +++--- docs/pages/core-concepts.mdx | 27 +++++++++---------- .../aws-ha-autoscale-cluster-terraform.mdx | 4 +-- docs/pages/faq.mdx | 5 ++-- docs/pages/includes/role-spec.mdx | 2 +- .../pages/kubernetes-access/manage-access.mdx | 2 +- .../manage-access/federation.mdx | 12 ++++----- docs/pages/machine-id/faq.mdx | 5 ++-- docs/pages/management/admin.mdx | 2 +- .../pages/management/operations/upgrading.mdx | 4 +-- docs/pages/reference/cli/tctl.mdx | 2 +- docs/pages/server-access/guides/openssh.mdx | 6 ++--- docs/pages/server-access/rbac.mdx | 4 +-- 19 files changed, 51 insertions(+), 55 deletions(-) diff --git a/docs/pages/access-controls/guides/ip-pinning.mdx b/docs/pages/access-controls/guides/ip-pinning.mdx index eea09b51a1154..f920d29c7f6d7 100644 --- a/docs/pages/access-controls/guides/ip-pinning.mdx +++ b/docs/pages/access-controls/guides/ip-pinning.mdx @@ -31,7 +31,7 @@ access will be denied. This means that if you enable IP pinning for some role, a order to regenerate their certificates. A client's observed IP will be propagated internally between Teleport services if needed, so Teleport performs the IP pinning check against the correct IP. -IP pinning can work across Trusted Clusters, but be aware that if a user tries to access a leaf cluster's resources through the root cluster, and their +IP pinning can work across trusted clusters, but be aware that if a user tries to access a leaf cluster's resources through the root cluster, and their mapped role on the leaf cluster has IP pinning enabled, they should also have IP pinning enabled on their root cluster roles. Otherwise, their certificates will not contain pinned IP information. diff --git a/docs/pages/access-controls/reference.mdx b/docs/pages/access-controls/reference.mdx index ef3552dc85bec..6c9aea06084ec 100644 --- a/docs/pages/access-controls/reference.mdx +++ b/docs/pages/access-controls/reference.mdx @@ -68,7 +68,7 @@ The following variables can be used with `logins` and `windows_desktop_logins` f | Variable | Description | | - | - | -| `{{internal.logins}}` | Substituted with a value stored in Teleport's local user database
and logins from a root cluster.

For local users, Teleport will substitute this with the
"allowed logins" parameter used in the
`tctl users add [user] ` command.

If the role is within a leaf cluster in a [Trusted Cluster](../management/admin/trustedclusters.mdx),
Teleport will substitute the logins from the root cluster
whether the user is a local user or from an SSO provider.

As an example, if the user has the `ubuntu` login in the root
cluster, then `ubuntu` will be substituted in the leaf
cluster with this variable. | +| `{{internal.logins}}` | Substituted with a value stored in Teleport's local user database
and logins from a root cluster.

For local users, Teleport will substitute this with the
"allowed logins" parameter used in the
`tctl users add [user] ` command.

If the role is within a leaf cluster in a [trusted cluster](../management/admin/trustedclusters.mdx),
Teleport will substitute the logins from the root cluster
whether the user is a local user or from an SSO provider.

As an example, if the user has the `ubuntu` login in the root
cluster, then `ubuntu` will be substituted in the leaf
cluster with this variable. | | `{{external.xyz}}` | Substituted with a value from an external [SSO provider](https://en.wikipedia.org/wiki/Single_sign-on).
If using SAML, this will be expanded with "xyz" assertion value.
For OIDC, this will be expanded a value of "xyz" claim. | Both variables above are there to deliver the same benefit: they allow Teleport diff --git a/docs/pages/agents/join-services-to-your-cluster/join-token.mdx b/docs/pages/agents/join-services-to-your-cluster/join-token.mdx index 026ab247daec4..7b3e32ff0b0f6 100644 --- a/docs/pages/agents/join-services-to-your-cluster/join-token.mdx +++ b/docs/pages/agents/join-services-to-your-cluster/join-token.mdx @@ -334,5 +334,4 @@ $ tctl tokens rm ## Next steps - If you have workloads split across different networks or clouds, we recommend - setting up Trusted Clusters. Read how to get started in our [Trusted Clusters - guide](../../management/admin/trustedclusters.mdx). + setting up trusted clusters. Read how to get started in [Configure Trusted Clusters](../../management/admin/trustedclusters.mdx). diff --git a/docs/pages/architecture/session-recording.mdx b/docs/pages/architecture/session-recording.mdx index 6de3e1f8e776c..c72d8d1814376 100644 --- a/docs/pages/architecture/session-recording.mdx +++ b/docs/pages/architecture/session-recording.mdx @@ -61,10 +61,10 @@ This results in 4 possible session recording configurations: This is a cluster-wide configuration option and applies to the entire Teleport cluster. It can be configured by setting `session_recording` -in the `auth_service` section of your `teleport.yaml`, or dynamically via -the the `session_recording_config` resource. If you need to apply different -recording configuration to different sets of resources, we recommend setting up -[Trusted Clusters](../management/admin/trustedclusters.mdx) with their own +in the `auth_service` section of your `teleport.yaml`, or dynamically using +the `session_recording_config` resource. If you need to apply different +recording configuration to different sets of resources, you can setup +[trusted clusters](../management/admin/trustedclusters.mdx) with their own recording configurations. diff --git a/docs/pages/architecture/tls-routing.mdx b/docs/pages/architecture/tls-routing.mdx index 7be9f3e3f0840..417903e6f1e23 100644 --- a/docs/pages/architecture/tls-routing.mdx +++ b/docs/pages/architecture/tls-routing.mdx @@ -90,8 +90,8 @@ how it's configured. ## Reverse tunnels -Reverse tunnel workers within the Teleport Node, Application and Database -Services, as well as for Trusted Clusters, open a TLS tunnel to the cluster's +Reverse tunnel workers within the Teleport SSH, Application, and Database +Services, as well as for trusted clusters, open a TLS tunnel to the cluster's Proxy Service with the `teleport-reversetunnel` ALPN protocol. The workers then dial SSH over the tunnel, establishing a secure connection. diff --git a/docs/pages/connect-your-client/teleport-connect.mdx b/docs/pages/connect-your-client/teleport-connect.mdx index a263cfb40b7f2..25ba465ebdd02 100644 --- a/docs/pages/connect-your-client/teleport-connect.mdx +++ b/docs/pages/connect-your-client/teleport-connect.mdx @@ -56,7 +56,7 @@ The top bar of Teleport Connect consists of: between them. - The **search bar** (in the middle), which allows you to search for resources across clusters. - The **cluster selector** (to the left of the search bar), which shows up only if you have set up - Trusted Clusters and there are leaf clusters connected to the root cluster. It lets you browse + trusted clusters and there are leaf clusters connected to the root cluster. It lets you browse leaf cluster resources. Also, the "Open new terminal" action will bind new terminal tabs to the selected cluster. - The **additional actions menu** (to the left of the profile selector), containing options such as opening a config file or creating an access request in an Enterprise cluster. @@ -85,7 +85,7 @@ this by setting the environment variables `TELEPORT_PROXY` and `TELEPORT_CLUSTER Additionally, Teleport Connect prepends the `PATH`/`Path` environment variable in the session with the directory containing the tsh binary, even if [tsh is not globally available](#using-tsh-outside-of-teleport-connect). -When using [Trusted Clusters](../management/admin/trustedclusters.mdx), the cluster selector allows +When using [trusted clusters](../management/admin/trustedclusters.mdx), the cluster selector allows you to determine which cluster the shell session will be bound to. The selected cluster will be reflected in both the tab title and the status bar. diff --git a/docs/pages/connect-your-client/tsh.mdx b/docs/pages/connect-your-client/tsh.mdx index 86d53511e1bea..ac3edba5958ed 100644 --- a/docs/pages/connect-your-client/tsh.mdx +++ b/docs/pages/connect-your-client/tsh.mdx @@ -697,14 +697,14 @@ Teleport supports creating clusters of servers located behind firewalls **without any open listening TCP ports**. This works by creating reverse SSH tunnels from behind-firewall environments into a Teleport Proxy Service you have access to. -These features are called **Trusted Clusters**. Refer to [the Trusted Clusters guide](../management/admin/trustedclusters.mdx) -to learn how a Trusted Cluster can be configured. +To learn more about setting up a trust relationship between clusters behind firewalls, see +[Configure Trusted Clusters](../management/admin/trustedclusters.mdx). -Assuming the Teleport Proxy Server called `work` is configured with a few Trusted -Clusters, a user may use the `tsh clusters` command to see a list of all Trusted Clusters on the server: +Assuming the Teleport Proxy Server called `work` is configured with a few trusted +clusters, you can use the `tsh clusters` command to see a list of all the trusted clusters on the server: ```code $ tsh --proxy=work clusters diff --git a/docs/pages/core-concepts.mdx b/docs/pages/core-concepts.mdx index 0be02a560387e..0c4701e38684e 100644 --- a/docs/pages/core-concepts.mdx +++ b/docs/pages/core-concepts.mdx @@ -214,17 +214,16 @@ authenticate to Teleport via a Single Sign-On (SSO) solution. See our guide to [Authentication Options](./reference/authentication.mdx). -### Trusted Cluster - -A **Trusted Cluster** is a remote **Teleport cluster** joined to your own -cluster via a trust relationship. Users in your Teleport cluster can access -resources in a Trusted Cluster. - -In a Trusted Cluster relationship, the remote cluster is called a **leaf -cluster**. Your cluster, if you are adding a leaf cluster to it, is called a -**root cluster**. - -Read our [Trusted Clusters architecture -guide](./architecture/trustedclusters.mdx) for how Trusted Clusters work and our -[how-to guide](./management/admin/trustedclusters.mdx) for how to configure -Trusted Clusters. +### Trusted clusters + +A **trusted cluster** consists of a **root cluster** and one or more **leaf clusters** +that trust the root cluster certificate authority. The trust relationship between the +root and leaf clusters enables users authenticated in the root cluster to access resources +in leaf cluster. The root and leaf cluster operate independently with their own +users, roles, and resources, but the trust relationship allows users with certain roles +in the root cluster to be mapped to roles and permissions defined in the leaf cluster. + +For more information about how to configure a trust relationship between clusters, +see [Configure Trusted Clusters](./management/admin/trustedclusters.mdx). +For an overview of the trusted clusters architecture, see [Trusted Cluster +Architecture]](./architecture/trustedclusters.mdx). diff --git a/docs/pages/deploy-a-cluster/deployments/aws-ha-autoscale-cluster-terraform.mdx b/docs/pages/deploy-a-cluster/deployments/aws-ha-autoscale-cluster-terraform.mdx index 0a811245954f9..c4da75ce11e65 100644 --- a/docs/pages/deploy-a-cluster/deployments/aws-ha-autoscale-cluster-terraform.mdx +++ b/docs/pages/deploy-a-cluster/deployments/aws-ha-autoscale-cluster-terraform.mdx @@ -823,9 +823,9 @@ teleport: proxy_server: :443 ``` -### Trusted Clusters +### Trusted clusters -To add a trusted cluster, you'll need the hostname of the proxy load balancer. +To add a trusted cluster, you'll need the hostname of the proxy load balancer. You can get it using this command: In this example, the `web_proxy_addr` in the trusted cluster configuration should be set up like this: diff --git a/docs/pages/faq.mdx b/docs/pages/faq.mdx index 47a6e3615aaa8..b9c779fe7ca21 100644 --- a/docs/pages/faq.mdx +++ b/docs/pages/faq.mdx @@ -31,11 +31,10 @@ functionality without a net addition of an agent on your system. Yes, this question comes up often and is related to the previous one. Take a look at [Using OpenSSH Guide](./server-access/guides/openssh.mdx). -## Can I connect to Nodes behind a firewall? +## Can I connect to nodes behind a firewall? Yes, Teleport supports reverse SSH tunnels out of the box. To configure -behind-firewall clusters refer to our -[Trusted Clusters](./management/admin/trustedclusters.mdx) guide. +behind-firewall clusters, see [Configure Trusted Clusters](./management/admin/trustedclusters.mdx). ## Should we use Teleport Enterprise or Teleport Community Edition for connecting resources to our Teleport cluster? (!docs/pages/includes/ent-vs-community-faq.mdx!) diff --git a/docs/pages/includes/role-spec.mdx b/docs/pages/includes/role-spec.mdx index 0a830a7649355..1cef8d3dd2e2e 100644 --- a/docs/pages/includes/role-spec.mdx +++ b/docs/pages/includes/role-spec.mdx @@ -385,7 +385,7 @@ spec: # saml - connector resource # github - GitHub connector resource # - # trusted_cluster - Trusted Cluster resource + # trusted_cluster - Trusted cluster resource # remote_cluster - remote cluster resource # # access_request - Access Request resource diff --git a/docs/pages/kubernetes-access/manage-access.mdx b/docs/pages/kubernetes-access/manage-access.mdx index e2dbc576e6561..e44be1e143dd2 100644 --- a/docs/pages/kubernetes-access/manage-access.mdx +++ b/docs/pages/kubernetes-access/manage-access.mdx @@ -1,6 +1,6 @@ --- title: Managing Access to Kubernetes Clusters -description: Use Teleport's sophisticated RBAC and Trusted Clusters to ensure that your teams have the correct access to your Kubernetes clusters. +description: Use Teleport's sophisticated RBAC and trusted clusters to ensure that your teams have the correct access to your Kubernetes clusters. --- Once you register a Kubernetes cluster with Teleport, you can apply diff --git a/docs/pages/kubernetes-access/manage-access/federation.mdx b/docs/pages/kubernetes-access/manage-access/federation.mdx index 69d73207cbbe2..250737bb4b4dd 100644 --- a/docs/pages/kubernetes-access/manage-access/federation.mdx +++ b/docs/pages/kubernetes-access/manage-access/federation.mdx @@ -1,18 +1,18 @@ --- title: Federated Kubernetes Access with Trusted Clusters -description: Federated Access using Teleport Trusted Clusters. +description: Federated Access using Teleport trusted clusters. --- There are cases when you have Kubernetes clusters that have to operate independently, for example, they are part of a different organization or have intermittent connectivity. -You can take advantage of [Trusted Clusters](../../management/admin/trustedclusters.mdx) +You can take advantage of [trusted clusters](../../management/admin/trustedclusters.mdx) to federate trust across Kubernetes clusters. -When multiple Trusted Clusters are present behind the Teleport Proxy Service, the +When multiple trusted clusters are present behind the Teleport Proxy Service, the `kubeconfig` generated by [tsh login](../../reference/cli/tsh.mdx#tsh-login) will contain the Kubernetes API endpoint determined by the `` argument to [tsh login](../../reference/cli/tsh.mdx#tsh-login). @@ -20,7 +20,7 @@ login](../../reference/cli/tsh.mdx#tsh-login). For example, consider the following setup: - There are three Teleport/Kubernetes clusters: `main`, `east`, and `west`. These are the names set in `cluster_name` setting in their configuration files. -- The clusters `east` and `west` are Trusted Clusters for `main`. +- The clusters `east` and `west` are trusted clusters for `main`. - Users always authenticate against `main` but use their certificates to access SSH nodes and the Kubernetes API in all three clusters. - The DNS name of the main Proxy Service is `main.example.com`. @@ -44,7 +44,7 @@ $ tsh --proxy=main.example.com login east -When multiple Trusted Clusters are present behind the Teleport Proxy Service, the +When multiple trusted Clusters are present behind the Teleport Proxy Service, the `kubeconfig` generated by [tsh login](../../reference/cli/tsh.mdx#tsh-login) will contain the Kubernetes API endpoint determined by the `` argument to [tsh login](../../reference/cli/tsh.mdx#tsh-login). @@ -52,7 +52,7 @@ login](../../reference/cli/tsh.mdx#tsh-login). For example, consider the following setup: - There are two Teleport/Kubernetes clusters, `east` and `west`. These are the names set in `cluster_name` setting in their configuration files. -- The clusters `east` and `west` are Trusted Clusters for a Teleport Team or Enterprise Cloud tenant, `mytenant.teleport.sh`. +- The clusters `east` and `west` are trusted clusters for a Teleport Team or Enterprise Cloud tenant, `mytenant.teleport.sh`. - Users always authenticate against `mytenant.teleport.sh` but use their certificates to access SSH nodes and the Kubernetes API in all three clusters. diff --git a/docs/pages/machine-id/faq.mdx b/docs/pages/machine-id/faq.mdx index 1cce1ea350b92..9e4335b4bf5ac 100644 --- a/docs/pages/machine-id/faq.mdx +++ b/docs/pages/machine-id/faq.mdx @@ -20,11 +20,10 @@ runs. ## Can Machine ID be used with Trusted Clusters ? -From Teleport 12.2, Trusted Cluster support for SSH Access has been included in -Machine ID. +From Teleport 12.2, you can use Machine ID for SSH Access in trusted leaf clusters. We currently do not support access to applications, databases, or Kubernetes -clusters in Trusted Clusters configured as leaf clusters. +clusters in leaf clusters. ## Should I define allowed logins as user traits or within roles? diff --git a/docs/pages/management/admin.mdx b/docs/pages/management/admin.mdx index fb2348be856bd..81c3931c8ee5a 100644 --- a/docs/pages/management/admin.mdx +++ b/docs/pages/management/admin.mdx @@ -21,7 +21,7 @@ environment without configuring TLS certificates. ## Manage users and resources -- [Trusted Clusters](./admin/trustedclusters.mdx): Connect multiple Teleport clusters using Trusted Clusters. +- [Trusted Clusters](./admin/trustedclusters.mdx): Connect multiple Teleport clusters using trusted clusters. - [Labels](./admin/labels.mdx): Manage resource metadata with labels. - [Local Users](./admin/users.mdx): Manage local user accounts. diff --git a/docs/pages/management/operations/upgrading.mdx b/docs/pages/management/operations/upgrading.mdx index b3b9ce47387dd..a272d16af7673 100644 --- a/docs/pages/management/operations/upgrading.mdx +++ b/docs/pages/management/operations/upgrading.mdx @@ -89,7 +89,7 @@ When upgrading multiple clusters: - Upgrade the root cluster—that is, the cluster that other clusters trust—first. - Verify the upgrade was successful. -- Upgrade the Trusted Clusters. +- Upgrade the trusted leaf clusters. @@ -102,7 +102,7 @@ When upgrading multiple clusters: - Upgrade the root cluster—that is, the cluster that other clusters trust—first. - Verify the upgrade was successful. -- Upgrade the Trusted Clusters. +- Upgrade the trusted leaf clusters. diff --git a/docs/pages/reference/cli/tctl.mdx b/docs/pages/reference/cli/tctl.mdx index ada2a1b459afa..b52277bd7bba3 100644 --- a/docs/pages/reference/cli/tctl.mdx +++ b/docs/pages/reference/cli/tctl.mdx @@ -7,7 +7,7 @@ description: Comprehensive reference of subcommands, flags, and arguments for th in a cluster, including nodes, users, tokens, certificates, and devices. `tctl` can also be used to modify the dynamic configuration of the cluster, such as -creating new user roles or connecting Trusted Clusters. +creating new user roles or connecting to trusted clusters. ## Authentication diff --git a/docs/pages/server-access/guides/openssh.mdx b/docs/pages/server-access/guides/openssh.mdx index b24504e8bf0cd..1660c0f63b0f8 100644 --- a/docs/pages/server-access/guides/openssh.mdx +++ b/docs/pages/server-access/guides/openssh.mdx @@ -156,7 +156,7 @@ order to make it easier to clean up, but you can append the output of Teleport implements an SSH server that includes several **subsystems**, or predefined commands that are run when the server handles a connection. The Proxy Service implements a `proxy` subsystem that forwards SSH traffic to remote hosts -and Trusted Clusters. +and trusted clusters. Here is a brief explanation of the configuration that `tsh config` generates: @@ -228,7 +228,7 @@ are using an OpenSSH client and have hosts with uppercase letters in their hostn If you switch between multiple Teleport Proxy Servers, you'll need to re-run `tsh config` for each to generate the cluster-specific configuration. - Similarly, if Trusted Clusters are added or removed, be sure to re-run + Similarly, if trusted clusters are added or removed, be sure to re-run `tsh config` and replace the previous configuration. @@ -300,7 +300,7 @@ host's SSH port.
-You can log in to a host in a Trusted Cluster by placing the name of the cluster +You can log in to a host in a trusted cluster by placing the name of the cluster between the name of the node and the name of your root Teleport cluster: ```code diff --git a/docs/pages/server-access/rbac.mdx b/docs/pages/server-access/rbac.mdx index 261398123fac7..81bf7e056bb29 100644 --- a/docs/pages/server-access/rbac.mdx +++ b/docs/pages/server-access/rbac.mdx @@ -95,8 +95,8 @@ spec: ``` The `{{internal.logins}}` variable applies to local users -and works with Teleport Trusted Clusters. Trusted Clusters allow -connecting via a root Teleport cluster to resources connected to other Teleport clusters. +and works with Teleport trusted clusters. Trusted clusters allow +connecting from a root Teleport cluster to resources connected to other Teleport clusters. Those Teleport clusters, identified as leaf clusters, allow the connection by trusting the root Teleport cluster. From a2eb31343cd4e82dbbee1cb516c38bfe0884eac2 Mon Sep 17 00:00:00 2001 From: Lisa Gunn Date: Fri, 22 Sep 2023 15:12:26 -0700 Subject: [PATCH 02/22] fix a typo --- docs/pages/architecture/session-recording.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/pages/architecture/session-recording.mdx b/docs/pages/architecture/session-recording.mdx index c72d8d1814376..f5543ff757b6e 100644 --- a/docs/pages/architecture/session-recording.mdx +++ b/docs/pages/architecture/session-recording.mdx @@ -63,7 +63,7 @@ This is a cluster-wide configuration option and applies to the entire Teleport cluster. It can be configured by setting `session_recording` in the `auth_service` section of your `teleport.yaml`, or dynamically using the `session_recording_config` resource. If you need to apply different -recording configuration to different sets of resources, you can setup +recording configurations to different sets of resources, you can set up [trusted clusters](../management/admin/trustedclusters.mdx) with their own recording configurations. From bbcc2831e2d540789ddb1e8e6129f00089743561 Mon Sep 17 00:00:00 2001 From: Lisa Gunn Date: Mon, 25 Sep 2023 12:23:57 -0700 Subject: [PATCH 03/22] Modify the description of trusted clusters in core concepts --- docs/pages/core-concepts.mdx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/pages/core-concepts.mdx b/docs/pages/core-concepts.mdx index 0c4701e38684e..247027ee3bfa0 100644 --- a/docs/pages/core-concepts.mdx +++ b/docs/pages/core-concepts.mdx @@ -216,14 +216,15 @@ See our guide to [Authentication Options](./reference/authentication.mdx). ### Trusted clusters -A **trusted cluster** consists of a **root cluster** and one or more **leaf clusters** -that trust the root cluster certificate authority. The trust relationship between the -root and leaf clusters enables users authenticated in the root cluster to access resources +Teleport allows you to configure a **trusted cluster relationship** between a +**root cluster** and one or more **leaf clusters** that trust the root cluster +certificate authority. The trust relationship between the root and leaf clusters +enables users authenticated in the root cluster to access resources in leaf cluster. The root and leaf cluster operate independently with their own users, roles, and resources, but the trust relationship allows users with certain roles in the root cluster to be mapped to roles and permissions defined in the leaf cluster. For more information about how to configure a trust relationship between clusters, see [Configure Trusted Clusters](./management/admin/trustedclusters.mdx). -For an overview of the trusted clusters architecture, see [Trusted Cluster -Architecture]](./architecture/trustedclusters.mdx). +For an overview of the architecture used in a trusted cluster relationship, see +[Trusted Cluster Architecture]](./architecture/trustedclusters.mdx). From 072a5e799a23d5703c9b9a405851a44f92991947 Mon Sep 17 00:00:00 2001 From: Lisa Gunn Date: Tue, 10 Oct 2023 13:20:25 -0700 Subject: [PATCH 04/22] Split openssh into two topics --- .../manage-access/federation.mdx | 10 +- .../guides/openssh/openssh-manual-install.mdx | 536 ++++++++++++++++++ .../guides/{ => openssh}/openssh.mdx | 52 +- 3 files changed, 582 insertions(+), 16 deletions(-) create mode 100644 docs/pages/server-access/guides/openssh/openssh-manual-install.mdx rename docs/pages/server-access/guides/{ => openssh}/openssh.mdx (83%) diff --git a/docs/pages/kubernetes-access/manage-access/federation.mdx b/docs/pages/kubernetes-access/manage-access/federation.mdx index 250737bb4b4dd..9517c42715480 100644 --- a/docs/pages/kubernetes-access/manage-access/federation.mdx +++ b/docs/pages/kubernetes-access/manage-access/federation.mdx @@ -19,8 +19,9 @@ login](../../reference/cli/tsh.mdx#tsh-login). For example, consider the following setup: -- There are three Teleport/Kubernetes clusters: `main`, `east`, and `west`. These are the names set in `cluster_name` setting in their configuration files. -- The clusters `east` and `west` are trusted clusters for `main`. +- There are three Teleport/Kubernetes clusters: the root cluster named `main` and the leaf clusters + `east` and `west` specified in the `cluster_name` setting in each cluster's configuration file. +- The clusters `east` and `west` are trust the `main` root cluster certificate authority. - Users always authenticate against `main` but use their certificates to access SSH nodes and the Kubernetes API in all three clusters. - The DNS name of the main Proxy Service is `main.example.com`. @@ -44,14 +45,15 @@ $ tsh --proxy=main.example.com login east -When multiple trusted Clusters are present behind the Teleport Proxy Service, the +When multiple trusted clusters are present behind the Teleport Proxy Service, the `kubeconfig` generated by [tsh login](../../reference/cli/tsh.mdx#tsh-login) will contain the Kubernetes API endpoint determined by the `` argument to [tsh login](../../reference/cli/tsh.mdx#tsh-login). For example, consider the following setup: -- There are two Teleport/Kubernetes clusters, `east` and `west`. These are the names set in `cluster_name` setting in their configuration files. +- There are two Teleport/Kubernetes clusters: `east` and `west`. + These are the names set in the `cluster_name` setting in their configuration files. - The clusters `east` and `west` are trusted clusters for a Teleport Team or Enterprise Cloud tenant, `mytenant.teleport.sh`. - Users always authenticate against `mytenant.teleport.sh` but use their certificates to access SSH nodes and the Kubernetes API in all three clusters. diff --git a/docs/pages/server-access/guides/openssh/openssh-manual-install.mdx b/docs/pages/server-access/guides/openssh/openssh-manual-install.mdx new file mode 100644 index 0000000000000..3c38168460143 --- /dev/null +++ b/docs/pages/server-access/guides/openssh/openssh-manual-install.mdx @@ -0,0 +1,536 @@ +--- +title: Using Teleport with OpenSSH in agentless mode (manual installation) +description: This guide shows you how to set up Teleport to enable secure access to OpenSSH servers so you can protect legacy systems that do not run a Teleport binary. +videoBanner: x0eYFUEIOrM +--- + +In this guide, we will show you how to configure the OpenSSH server `sshd` to +join a Teleport cluster. Existing fleets of OpenSSH servers can be configured to +accept SSH certificates dynamically issued by a Teleport CA. + +Using Teleport and OpenSSH has the advantage of getting you up +and running, but in the long run, we would recommend replacing `sshd` with `teleport`. +`teleport` SSH servers have support for multiple features that are incompatible with OpenSSH: + +- RBAC and resource filtering based on [dynamically updated labels](../../../management/admin/labels.mdx) +- [Session recording without SSH connection termination](../recording-proxy-mode.mdx) +- [Session sharing](../../../connect-your-client/tsh.mdx) +- [Advanced session recording](../bpf-session-recording.mdx) +- [Restricting outbound network connections in SSH sessions](../restricted-session.mdx) + +Teleport supports OpenSSH by proxying SSH connections through the Proxy Service. When a Teleport user requests to connect to an OpenSSH node, the Proxy Service checks the user's Teleport roles. + +If the RBAC checks succeed, the Proxy Service authenticates to the OpenSSH node with a dynamically generated certificate signed by a Teleport CA. This allows the +Proxy Service to record and audit connections to OpenSSH nodes. + +The Proxy Service prevents Teleport users from bypassing auditing by requiring +a certificate signed by a Teleport CA that only the Auth Service possesses. + +In this setup, the Teleport SSH Service performs RBAC checks as well as audits and records sessions on its host, which eliminates the need for connection termination when recording SSH sessions. + + + + This guide shows you how to register an OpenSSH node by creating a node resource + and configuring OpenSSH to trust the Teleport CA. If you can copy the + `teleport` binary onto your OpenSSH node and execute it however, you can follow + the [standard registration guide](openssh.mdx) instead, which has fewer steps. + Teleport is able to perform many of the steps we show in this guide automatically. + + + +## Prerequisites + +- OpenSSH version 6.9 or above on your local machine. View your OpenSSH version + with the command: + + ```code + $ ssh -V + ``` + +(!docs/pages/includes/edition-prereqs-tabs.mdx!) + +- A Linux host with the OpenSSH server `sshd` version 7.4 or above installed, + but not Teleport. The SSH port on this host must be open to traffic from the + Teleport Proxy Service host. +- (!docs/pages/includes/tctl.mdx!) + +### Upgrading to v14 from legacy OpenSSH nodes + + If you have previously configured OpenSSH nodes to trust a Teleport CA without + registering them and you upgrade your Teleport cluster to Teleport 14, you won't + be able to connect to them anymore by default. This is because open dialing to + OpenSSH servers not registered with the cluster is no longer allowed in Teleport 14. + To ensure that you will retain access to your OpenSSH nodes you will need to follow + this guide to register every OpenSSH node with Teleport that you previously + configured. This must be done *before* your Teleport cluster is upgraded to Teleport 14. + + If you are having issues registering OpenSSH nodes or need to upgrade your + Teleport cluster to Teleport 14 before registering all of your OpenSSH nodes, you can + pass the `TELEPORT_UNSTABLE_UNLISTED_AGENT_DIALING` environment variable to your + Proxy Service and set it to `yes`. This will allow connections to unregistered + OpenSSH nodes but will be removed in Teleport v15. + + +## Step 1/5. Add a node resource to your Teleport cluster + +When you request an SSH connection to a OpenSSH node, Teleport needs to be able +to find the node's IP address so it can establish a connection to it. + +Declare a `node` resource so Teleport knows how to reach your OpenSSH server. +On your local machine, create a file called `openssh-node-resource.yaml` with the following content: + +```yaml +kind: node +version: v2 +sub_kind: openssh +metadata: + name: a100fdd0-52db-4eca-a7ab-c3afa7a1564a + labels: + env: prod +spec: + addr: 1.2.3.4:22 + hostname: openssh-node +``` + +`spec.addr` and `spec.hostname` are mandatory. Assign `spec.addr` to the address and port of your node +and `spec.hostname` to the name of the node as you would like users to see it in Teleport. + +The `metadata.labels` field labels the SSH Service instance so you can apply RBAC rules to it. + +The `metadata.name` field isn't mandatory, but setting it here will save you some work later. + +To generate a new universal unique identifier (UUID) suitable for a `node` name, use the `uuidgen` +on Linux or MacOS, or use the `New-Guid` cmdlet in Powershell on Windows. + +Create the node resource: + +```code +$ tctl create openssh-node-resource.yaml +``` + +## Step 2/5. Configure `sshd` to trust the Teleport CA + +Later in this guide, we will generate an SSH client configuration that will use +a certificate signed by the Teleport Auth Service to authenticate to your SSH +server. For this to work, `sshd` must be told to allow users to log in with +certificates generated by the Teleport Auth Service. + +Start by exporting the Teleport CA public key. + +On the host where you are running `sshd`, run the following commands, assigning to the address of your Teleport Proxy Service: + +```code +$ export KEY=$(curl 'https:///webapi/auth/export?type=openssh' | sed "s/cert-authority\ //") +``` + +Make the public key accessible to `sshd`: + +```code +$ sudo bash -c "echo \"$KEY\" > /etc/ssh/teleport_openssh_ca.pub" +$ sudo bash -c "echo 'TrustedUserCAKeys /etc/ssh/teleport_openssh_ca.pub' >> /etc/ssh/sshd_config" +``` + +Restart `sshd`. For systemd-enabled hosts, run the following command: + +```code +$ sudo systemctl restart sshd +``` + +Now, `sshd` will trust users who present a Teleport-issued certificate. + +## Step 3/5. Configure host authentication + +Next, ask Teleport to issue a valid host certificate for your `sshd` host. Later +in this guide, we will configure your SSH client to trust the certificate, +authenticating your `sshd` host for your SSH client. Like the user certificate +we created earlier, the host certificate will be signed by the Teleport Auth +Service. + +### Ensure that your user has the correct privileges + +Your user must be authorized to read and write host certificates. + +On your local machine, create a file called `host-certifier.yaml` with the +following content: + +```yaml +kind: role +version: v5 +metadata: + name: host-certifier +spec: + allow: + rules: + - resources: + - host_cert + verbs: + - list + - create + - read + - update + - delete +``` + +Create the role resource: + +```code +$ tctl create host-certifier.yaml +# role 'host-certifier' has been created +``` + +(!docs/pages/includes/add-role-to-user.mdx role="host-certifier"!) + +You will now have the required permissions to export a host key for your `sshd` +host. + +### Issue a host certificate + +
+ +When you created a `node` resource and if you didn't set the `metadata.name` field earlier, +the Teleport Auth Service generated a universal unique identifier (UUID) for that node. +Teleport Proxy Services uses the UUID to differentiate nodes with the same hostname, so +it must be added to the host certificate. To find your node's UUID, first determine if its hostname is unique: + +```code +$ tctl get node/openssh-node --format text +``` + +If only one node is displayed and you have `jq` installed, you can run the +following command to get your node's UUID: + +```code +$ tctl get node/openssh-node --format=json | jq -r ".[0].metadata.name" +``` + +Otherwise, find your node's UUID in the `metadata.name` field of the YAML +output of this command: + +```code +$ tctl get node/openssh-node +``` + +
+ +#### Create the host certificate + +When creating host certificates, it is important to specify all the domain names +and addresses that refer to your node. If you try to connect to a node with a +name or address that was not specified when creating it's host certificate, +Teleport will reject the SSH connection. + +On your local machine, assign the IP address, fully qualified domain name of +your node, and the node's UUID to an environment variable. If you won't be +connecting to your node with its hostname, you can safely omit it. + +```code +$ ADDR=1.2.3.4,openssh-node,a100fdd0-52db-4eca-a7ab-c3afa7a1564a +``` + +Run the following `tctl` command to generate a host certificate: + +```code +$ tctl auth sign \ + --host=${ADDR?} \ + --format=openssh \ + --out=myhost + +# The credentials have been written to myhost, myhost-cert.pub +``` + +The above command will result in a private key and certificate. + +
+ +To generate certificates for multiple hosts, assign the `host` flag to a +comma-separated list of addresses. Certificates for wildcard domains are not +supported by OpenSSH, so each domain must be fully qualified. + +
+ +Use `ssh-keygen` to verify the contents of the certificate: + +```code +$ ssh-keygen -L -f myhost-cert.pub +``` + +The `Principals` section should contain the address you assigned to `ADDR` +earlier: + +``` +myhost-cert.pub: + Type: ssh-rsa-cert-v01@openssh.com host certificate + Public key: RSA-CERT SHA256:nHkp6SnrAW4AV00VUaqPgR6SgdyvV9MmjUrYnwZ779A + Signing CA: RSA SHA256:euqx2Y8Pq+r0c94GKVNXAklBVTmAJtaQUn3/ehrfEJE (using rsa-sha2-512) + Key ID: "" + Serial: 0 + Valid: after 2022-04-22T11:14:16 + Principals: + 1.2.3.4 + openssh-node + a100fdd0-52db-4eca-a7ab-c3afa7a1564a + Critical Options: (none) + Extensions: + x-teleport-authority UNKNOWN OPTION (len 33) + x-teleport-role UNKNOWN OPTION (len 8) +``` + +Copy the host key and certificate to your `sshd` host, placing them in the +directory `/etc/ssh`. + +Make sure these files have the correct permissions: + +```code +$ sudo chmod 0600 /etc/ssh/myhost +$ sudo chmod 0600 /etc/ssh/myhost-cert.pub +``` + +Then add the following lines to `/etc/ssh/sshd_config` on your `sshd` host: + +```yaml +HostKey /etc/ssh/myhost +HostCertificate /etc/ssh/myhost-cert.pub +``` + +Restart `sshd`. + +## Step 4/5. Generate an SSH client configuration + +The next step is to configure your OpenSSH client to connect to your `sshd` host +using credentials managed by Teleport. This configuration will use your user's +Teleport-issued certificate to authenticate to the `sshd` host. It will also +authenticate the `sshd` host using the host certificate you generated earlier. + +First, make sure you have logged in to your Teleport cluster: + + + + +```code +$ tsh status +> Profile URL: https://teleport.example.com:443 + Logged in as: myuser + Cluster: teleport.example.com + Roles: access, auditor, editor, host-certifier + Logins: ubuntu, root + Kubernetes: enabled + Valid until: 2022-05-06 22:54:01 -0400 EDT [valid for 11h53m0s] + Extensions: permit-agent-forwarding, permit-port-forwarding, permit-pty +``` + + + + +```code +$ tsh status +> Profile URL: https://teleport.example.com:443 + Logged in as: myuser + Cluster: teleport.example.com + Roles: access, auditor, editor, reviewer, host-certifier + Logins: ubuntu, root + Kubernetes: enabled + Valid until: 2022-05-06 22:54:01 -0400 EDT [valid for 11h53m0s] + Extensions: permit-agent-forwarding, permit-port-forwarding, permit-pty +``` + + + + +```code +$ tsh status +> Profile URL: https://mytenant.teleport.sh:443 + Logged in as: myuser + Cluster: mytenant.teleport.sh + Roles: access, auditor, editor, reviewer, host-certifier + Logins: ubuntu, root + Kubernetes: enabled + Valid until: 2022-05-06 22:54:01 -0400 EDT [valid for 11h53m0s] + Extensions: permit-agent-forwarding, permit-port-forwarding, permit-pty +``` + + + + +On your local machine, run the following `tsh` command. This will print a +configuration block that tells your SSH client to use credentials managed by +Teleport to connect to hosts in your cluster. + +```code +$ tsh config > ssh_config_teleport +``` + +This command creates an SSH configuration file at a nonstandard location in +order to make it easier to clean up, but you can append the output of +`tsh config` to the default SSH config file (`~/.ssh/config`) if you wish. + +
+ +Teleport implements an SSH server that includes several **subsystems**, or +predefined commands that are run when the server handles a connection. The Proxy +Service implements a `proxy` subsystem that forwards SSH traffic to remote hosts +and trusted clusters. + +Here is a brief explanation of the configuration that `tsh config` generates: + +``` +# Common flags for all {{ .ClusterName }} hosts +Host *.{{ .ClusterName }} {{ .ProxyHost }} + UserKnownHostsFile "{{ .KnownHostsPath }}" + IdentityFile "{{ .IdentityFilePath }}" + CertificateFile "{{ .CertificateFilePath }}" +``` + +If the host you are `ssh`ing into belongs to your Teleport cluster (i.e., its +address is a subdomain of your cluster's domain), use a Teleport-managed known +hosts file, private key, and certificate that are stored in the `.tsh` +directory. + +``` +# Flags for all {{ .ClusterName }} hosts except the proxy +Host *.{{ .ClusterName }} !{{ .ProxyHost }} + Port 3022 + ProxyCommand "{{ .TSHPath }}" proxy ssh --cluster={{ .ClusterName }} --proxy={{ .ProxyHost }} %r@%h:%p +``` + +If the host that you are `ssh`ing into belongs to your Teleport cluster, the +OpenSSH client will first execute a command, the `ProxyCommand`, that +establishes an SSH connection to the Proxy Service. This command, +`tsh proxy ssh`, requests the `proxy` subsystem in order to forward SSH traffic +through the Proxy Service to your chosen host (including a host in a Trusted +Cluster). + +The `tsh proxy ssh` command requests the `proxy` subsystem through a command +similar to the following, which assumes you are logging in to a node called +`mynode` as `root` with a cluster called `teleport.example.com`: + +```code +$ /usr/bin/ssh -l root -A -o UserKnownHostsFile=/root/.tsh/known_hosts -p 11105 teleport.example.com -s proxy:mynode:3022@teleport.example.com +``` + +Notice that the `known_hosts` file used by the command is managed by `tsh`. +Since the `sshd` host's information is listed in this file, your SSH client can +authenticate the host via the certificate we generated earlier. + +
+ +
+ + If using PowerShell on Windows, note that normal shell redirection may write + the file with the incorrect encoding. To ensure it's written properly, try the + following: + + ```code + $ tsh.exe config | out-file .ssh\config -encoding utf8 -append + ``` + +
+ +
+Routing in Teleport clusters is case-sensitive by default, but OpenSSH always lowercases hostnames. If you +are using an OpenSSH client and have hosts with uppercase letters in their hostnames, you may need to set +`case_insensitive_routing: true` in either the `auth_service` block of your Teleport config, or in the +`cluster_networking_config` resource. +
+ + + + If you switch between multiple Teleport Proxy Servers, you'll need to re-run + `tsh config` for each to generate the cluster-specific configuration. + + Similarly, if trusted clusters are added or removed, be sure to re-run + `tsh config` and replace the previous configuration. + + + +## Step 5/5. Connect to your `sshd` host + +Once you have appended the new text to your OpenSSH client configuration file, +you can log in to your `sshd` host using the configuration we generated earlier. + +First, define environment variables for the address of your Teleport cluster, +the username you will use to log in to your `sshd` host, and the port on your +`sshd` host you are using for SSH traffic: + + + + +```code +# See the available logins you can use to access your sshd host +$ tsh status | grep Logins +Logins: ubuntu, root +$ USER=ubuntu +$ CLUSTER=teleport.example.com +$ PORT=22 +``` + + + + +```code +# See the available logins you can use to access your sshd host +$ tsh status | grep Logins +Logins: ubuntu, root +$ USER=ubuntu +$ CLUSTER=mytenant.teleport.sh +$ PORT=22 +``` + + + + +Next, SSH in to your remote host: + +```code +$ ssh -p ${PORT?} -F ssh_config_teleport "${USER?}@${ADDR?}.${CLUSTER?}" +``` + +This name does not need to be resolvable via DNS as the connection will be +routed through your Teleport Proxy Service. + +
+ +By default, the OpenSSH client configuration generated by `tsh config` directs +the Teleport Proxy Service to dial port 3022 of a node in your Teleport cluster. +This works if the node's SSH Service is listening on port 3022, and means that +you can connect to the Teleport SSH Service via your OpenSSH client. + + When you join a Teleport node to a cluster, the node creates a reverse tunnel + to the cluster's Proxy Service. When you run an `ssh` command to access a host + in your Teleport cluster using the configuration we generated, the Teleport + Proxy Service will attempt to connect to the host via this reverse tunnel and, + if that fails, try directly dialing the address. + +In our case, the `sshd` host is not running Teleport, so no reverse tunnel will +exist. Instead, the Proxy Service will establish a direct connection on the +host's SSH port. + +
+ +
+ +You can log in to a host in a trusted leaf cluster by placing the name of the leaf cluster +between the name of the node and the name of your root cluster: + +```code +$ ssh -F ssh_config_teleport ${USER?}@node2.leafcluster.${CLUSTER} +``` + +
+ + + + Teleport uses OpenSSH certificates instead of keys. When you connect to a + remote host, OpenSSH verifies that the address of the host is listed under the + `Principals` section of the OpenSSH certificate. Usually, this is a fully + qualified domain name, rather than an IP address. + + \ No newline at end of file diff --git a/docs/pages/server-access/guides/openssh.mdx b/docs/pages/server-access/guides/openssh/openssh.mdx similarity index 83% rename from docs/pages/server-access/guides/openssh.mdx rename to docs/pages/server-access/guides/openssh/openssh.mdx index 1660c0f63b0f8..19089fe051907 100644 --- a/docs/pages/server-access/guides/openssh.mdx +++ b/docs/pages/server-access/guides/openssh/openssh.mdx @@ -14,11 +14,11 @@ Using Teleport and OpenSSH has the advantage of getting you up and running, but in the long run, we would recommend replacing `sshd` with `teleport`. `teleport` SSH servers have support for multiple features that are incompatible with OpenSSH: -- RBAC and resource filtering based on [dynamically updated labels](../../management/admin/labels.mdx) -- [Session recording without SSH connection termination](recording-proxy-mode.mdx) -- [Session sharing](../../connect-your-client/tsh.mdx) -- [Advanced session recording](bpf-session-recording.mdx) -- [Restricting outbound network connections in SSH sessions](restricted-session.mdx) +- RBAC and resource filtering based on [dynamically updated labels](../../../management/admin/labels.mdx) +- [Session recording without SSH connection termination](../recording-proxy-mode.mdx) +- [Session sharing](../../../connect-your-client/tsh.mdx) +- [Advanced session recording](../bpf-session-recording.mdx) +- [Restricting outbound network connections in SSH sessions](../restricted-session.mdx) Teleport supports OpenSSH by proxying SSH connections through the Proxy Service. When a Teleport user requests to connect to an OpenSSH node, the Proxy Service checks the user's Teleport roles. @@ -30,6 +30,19 @@ a certificate signed by a Teleport CA that only the Auth Service possesses. In this setup, the Teleport SSH Service performs RBAC checks as well as audits and records sessions on its host, which eliminates the need for connection termination when recording SSH sessions. + + + Registering an OpenSSH node with Teleport involves copying the `teleport` binary + onto your `sshd` host. The `teleport` binary will handle registering the node + with your cluster, generating certificates, modifying your OpenSSH `sshd` config, + and more. If copying the `teleport` binary onto your `sshd` and running it isn't + an option, you can [register your node manually instead](openssh-manual-install.mdx). + + + ## Prerequisites - OpenSSH version 6.9 or above on your local machine. View your OpenSSH version @@ -46,6 +59,22 @@ In this setup, the Teleport SSH Service performs RBAC checks as well as audits a Teleport Proxy Service host. - (!docs/pages/includes/tctl.mdx!) +### Upgrading to v14 from legacy OpenSSH nodes + + If you have previously configured OpenSSH nodes to trust a Teleport CA without + registering them and you upgrade your Teleport cluster to Teleport 14, you won't + be able to connect to them anymore by default. This is because open dialing to + OpenSSH servers not registered with the cluster is no longer allowed in Teleport 14. + To ensure that you will retain access to your OpenSSH nodes you will need to follow + this guide to register every OpenSSH node with Teleport that you previously + configured. This must be done *before* your Teleport cluster is upgraded to Teleport 14. + + If you are having issues registering OpenSSH nodes or need to upgrade your + Teleport cluster to Teleport 14 before registering all of your OpenSSH nodes, you can + pass the `TELEPORT_UNSTABLE_UNLISTED_AGENT_DIALING` environment variable to your + Proxy Service and set it to `yes`. This will allow connections to unregistered + OpenSSH nodes but will be removed in Teleport v15. + ## Step 1/3. Configure `sshd` Teleport only allows access to resources in your infrastructure via Teleport @@ -78,7 +107,8 @@ Change the command-line options to assign the following values: Check that your new node is listed with `tsh ls` or in the Web UI. You can edit the hostname and labels with `tctl edit nodes/`. If the hostname isn't unique, get the UUID -from `tctl nodes ls -v` and edit with `tctl edit nodes/`. +from `tctl nodes ls -v` and edit with `tctl edit nodes/`. After you've confirmed the node +was registered successfully you can delete the copied `teleport` binary. ## Step 2/3. Generate an SSH client configuration @@ -135,7 +165,6 @@ $ tsh status ```
- On your local machine, run the following `tsh` command. This will print a @@ -267,7 +296,6 @@ $ PORT=22 ``` - Next, SSH in to your remote host: @@ -298,10 +326,10 @@ host's SSH port.
-
+
-You can log in to a host in a trusted cluster by placing the name of the cluster -between the name of the node and the name of your root Teleport cluster: +You can log in to a host in a trusted leaf cluster by placing the name of +the leaf cluster between the name of the node and the name of the root cluster: ```code $ ssh -F ssh_config_teleport ${USER?}@node2.leafcluster.${CLUSTER} @@ -319,4 +347,4 @@ $ ssh -F ssh_config_teleport ${USER?}@node2.leafcluster.${CLUSTER} `Principals` section of the OpenSSH certificate. Usually, this is a fully qualified domain name, rather than an IP address. - + \ No newline at end of file From 028ccd8ddaff37d0c42e2d098b3e7816adf7c1e5 Mon Sep 17 00:00:00 2001 From: lsgunn-teleport <136391445+lsgunn-teleport@users.noreply.github.com> Date: Tue, 10 Oct 2023 15:34:23 -0700 Subject: [PATCH 05/22] Update federation.mdx --- docs/pages/kubernetes-access/manage-access/federation.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/pages/kubernetes-access/manage-access/federation.mdx b/docs/pages/kubernetes-access/manage-access/federation.mdx index 9517c42715480..5e1d0373f849b 100644 --- a/docs/pages/kubernetes-access/manage-access/federation.mdx +++ b/docs/pages/kubernetes-access/manage-access/federation.mdx @@ -21,7 +21,7 @@ For example, consider the following setup: - There are three Teleport/Kubernetes clusters: the root cluster named `main` and the leaf clusters `east` and `west` specified in the `cluster_name` setting in each cluster's configuration file. -- The clusters `east` and `west` are trust the `main` root cluster certificate authority. +- The clusters `east` and `west` trust the `main` root cluster certificate authority. - Users always authenticate against `main` but use their certificates to access SSH nodes and the Kubernetes API in all three clusters. - The DNS name of the main Proxy Service is `main.example.com`. From 21047247ca7f363c614c66cb0d4af68e2f4b9575 Mon Sep 17 00:00:00 2001 From: Nic Klaassen Date: Tue, 10 Oct 2023 12:48:22 -0700 Subject: [PATCH 06/22] disable TestHSMDualAuthRotation (#33251) --- integration/hsm/hsm_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/integration/hsm/hsm_test.go b/integration/hsm/hsm_test.go index 2328845d54826..9b7d12a1ef2ac 100644 --- a/integration/hsm/hsm_test.go +++ b/integration/hsm/hsm_test.go @@ -231,6 +231,10 @@ func testAdminClient(t *testing.T, authDataDir string, authAddr string) { // Tests multiple CA rotations and rollbacks with 2 HSM auth servers in an HA configuration func TestHSMDualAuthRotation(t *testing.T) { + // TODO(nklaassen): fix this test and re-enable it. + // https://github.com/gravitational/teleport/issues/20217 + t.Skip("TestHSMDualAuthRotation is temporarily disabled due to flakiness") + requireHSMAvailable(t) requireETCDAvailable(t) From 630b451cf51bf56ccbe24ac44fcdf0aa2c36b35f Mon Sep 17 00:00:00 2001 From: lsgunn-teleport <136391445+lsgunn-teleport@users.noreply.github.com> Date: Tue, 10 Oct 2023 12:52:25 -0700 Subject: [PATCH 07/22] [v14] docs: Caveat for token permissions not scoped to any resource context (#33166) * caveat for token permissions not scoped to any resource context * Add section for token resource * Split paragraph * Modify wording to remove type of token language * fix typo * Remove device trust example --- docs/pages/access-controls/reference.mdx | 31 ++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/docs/pages/access-controls/reference.mdx b/docs/pages/access-controls/reference.mdx index 6c9aea06084ec..4bc163a479499 100644 --- a/docs/pages/access-controls/reference.mdx +++ b/docs/pages/access-controls/reference.mdx @@ -357,9 +357,40 @@ allow: - resources: - token verbs: [list, create, read, update, delete] +``` + +### Allowing access to token resources +If you configure a role that allows tokens to be created, users assigned to the +role can create tokens to provision any type of Teleport resource. +For example, you might create a role with the following configuration to enable assigned +users to enroll servers: + +```yaml +kind: role +version: v7 +metadata: + name: enroll-servers +spec: + allow: + node_labels: + 'env': 'us-lab' + rules: + - resources: [token] + verbs: [list, create, read, update, delete] + deny: {} ``` +With these permissions, users assigned to the role can generate tokens to enroll +a server, application, or database, establish a trust relationship between a root +cluster and a new Teleport Proxy Service, or add a new leaf cluster. + +Because the token resource isn't scoped to a specific context, such as a node or +trusted cluster, you should consider any role that provides token permissions to be +an administrative role. In particular, you should avoid configuring `allow` rules +that grant `create` and `update` permissions on `token` resources to prevent +unexpected changes to the configuration or state of your cluster. + ## RBAC for sessions It is possible to further limit access to From 63aaaf22e050be7642651554eedf84c243996d38 Mon Sep 17 00:00:00 2001 From: Steven Martin Date: Tue, 10 Oct 2023 16:27:34 -0400 Subject: [PATCH 08/22] [v14] docs: role definition update and update networking ports info (#33223) * docs: update session resources * adds instance, session_tracker and updates ssh_session * docs: update networking ports --- docs/pages/access-controls/reference.mdx | 12 ++++++------ docs/pages/includes/role-spec.mdx | 4 +++- docs/pages/reference/networking.mdx | 25 ++++++++++++++++-------- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/docs/pages/access-controls/reference.mdx b/docs/pages/access-controls/reference.mdx index 4bc163a479499..225df2a287f11 100644 --- a/docs/pages/access-controls/reference.mdx +++ b/docs/pages/access-controls/reference.mdx @@ -435,18 +435,18 @@ metadata: spec: allow: rules: - # Teleport allows shared session access by default, so for our restrictions - # to work we first allow access to ssh_sessions... - - resources: [ssh_session] + # Teleport allows session access to the user's sessions + # and sessions they can join by default. This allows seeing any sessions. + - resources: [session_tracker] verbs: ['*'] deny: rules: # ... and then limit that access via a deny rule. # Deny rules take precedence over allow rules, so the resulting role allows # users to create SSH sessions but to only view their own sessions. - - resources: [ssh_session] + - resources: [session_tracker] verbs: [list, read, update, delete] - where: '!contains(ssh_session.participants, user.metadata.name)' + where: '!contains(session_tracker.participants, user.metadata.name)' ``` ## Second Factor - U2F @@ -462,7 +462,7 @@ Here is an explanation of the fields used in the `where` and `filter` conditions | -------------------------- | ------------------------------------------------- | | `user.spec.roles` | The list of roles assigned to a user | | `session.participants` | The list of participants from a session recording | -| `ssh_session.participants` | The list of participants from an SSH session | +| `session_tracker.participants` | The list of participants from an SSH session | | `user.metadata.name` | The user's name | Check out our [predicate language](../reference/predicate-language.mdx#scoping-allowdeny-rules-in-role-resources) diff --git a/docs/pages/includes/role-spec.mdx b/docs/pages/includes/role-spec.mdx index 1cef8d3dd2e2e..94df10f9c630d 100644 --- a/docs/pages/includes/role-spec.mdx +++ b/docs/pages/includes/role-spec.mdx @@ -392,7 +392,9 @@ spec: # access_plugin_data - allows modifying Access Request plugin data # # session - session playback records - # ssh_session - an active SSH session + # session_tracker - an active session + # ssh_session - allows seeing active sessions page + # instance - a Teleport instance # event - structured audit logging event # # diff --git a/docs/pages/reference/networking.mdx b/docs/pages/reference/networking.mdx index 888898c989478..4958a788eb4e8 100644 --- a/docs/pages/reference/networking.mdx +++ b/docs/pages/reference/networking.mdx @@ -149,6 +149,10 @@ In those cases, they can set up separate listeners in the config file. | 3023 | All clients | SSH port clients connect to. The Proxy Service will forward this connection to port `3022` on the destination service. | | 3024 | Auth Service | SSH port used to create reverse SSH tunnels from behind-firewall environments into a trusted Proxy Service instance. | | 3080 or 443 | Proxy Service | HTTPS connection to authenticate `tsh` users into the cluster. The same connection is used to serve a Web UI. | +| 3036 | Database Service | Traffic to MySQL databases.| +| 5432 | Database Service | Traffic to Postgres databases.| +| 27017 | Database Service | Traffic to MongoDB instances.| +| 6379 | Database Service | Traffic to Redis instances.| ### Auth Service ports @@ -225,19 +229,24 @@ meaning that you can expose ports on that service's host directly to clients. This is useful when you need to connect to resources directly if the Proxy Service becomes unavailable. + + In Teleport Cloud, the Auth and Proxy Services run in Teleport-owned infrastructure. +For this reason, Teleport Cloud customers must connect their resources via reverse tunnels. +Exposing ports for direct dial is only supported in self-hosted deployments. + + The table below describes the ports that each Teleport Service opens for proxied traffic: | Port | Service | Traffic Type | | - | - | - | | 3022 | SSH Service | Incoming SSH connections.| -| 3026 | Kubernetes Service | HTTPS traffic to a Kubernetes API server.| -| 3036 | Database Service | Traffic to MySQL databases.| -| 5432 | Database Service | Traffic to Postgres databases.| -| 27017 | Database Service | Traffic to MongoDB instances.| -| 6379 | Database Service | Traffic to Redis instances.| +| 3026 | Kubernetes Service | HTTPS traffic to a Kubernetes API server.| | 3028 | Windows Desktop Service | Teleport Desktop Protocol traffic from Teleport clients.| -Applications registered with the Teleport Application Service can only be -accessed via the Teleport Proxy Service, not directly via the Application -Service. +You can only access enrolled applications and databases through the Teleport Proxy Service. +The Teleport Application Service and Teleport Database Service use reverse tunnel +connections through the Teleport Proxy Service and cannot expose ports directly. \ No newline at end of file From 1a09c53d205985c85f5782f3c709544194c4be01 Mon Sep 17 00:00:00 2001 From: Forrest <30576607+fspmarshall@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:00:29 -0700 Subject: [PATCH 09/22] fix watcher setup in oidc test (#33258) --- tool/tsh/common/tsh_test.go | 60 ++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/tool/tsh/common/tsh_test.go b/tool/tsh/common/tsh_test.go index b7a5ac2cac617..c3d440d3404a3 100644 --- a/tool/tsh/common/tsh_test.go +++ b/tool/tsh/common/tsh_test.go @@ -422,39 +422,40 @@ func TestOIDCLogin(t *testing.T) { proxyAddr, err := proxyProcess.ProxyWebAddr() require.NoError(t, err) + // set up watcher to approve the automatic request in background var didAutoRequest atomic.Bool + watcher, err := authServer.NewWatcher(ctx, types.Watch{ + Kinds: []types.WatchKind{ + {Kind: types.KindAccessRequest}, + }, + }) + require.NoError(t, err) + + // ensure that we observe init event prior to moving watcher to background + // goroutine (ensures watcher init does not race with request creation). + select { + case event := <-watcher.Events(): + require.Equal(t, event.Type, types.OpInit) + case <-watcher.Done(): + require.FailNow(t, "watcher closed unexpected", "err: %v", watcher.Error()) + } - errCh := make(chan error) go func() { - watcher, err := authServer.NewWatcher(ctx, types.Watch{ - Kinds: []types.WatchKind{ - {Kind: types.KindAccessRequest}, - }, - }) - if err != nil { - errCh <- err - return - } - for { - select { - case event := <-watcher.Events(): - if event.Type != types.OpPut { - continue - } - err = authServer.SetAccessRequestState(ctx, types.AccessRequestUpdate{ - RequestID: event.Resource.(types.AccessRequest).GetName(), - State: types.RequestState_APPROVED, - }) - didAutoRequest.Store(true) - errCh <- err - return - case <-watcher.Done(): - errCh <- nil - return - case <-ctx.Done(): - errCh <- nil - return + select { + case event := <-watcher.Events(): + if event.Type != types.OpPut { + panic(fmt.Sprintf("unexpected event type: %v\n", event)) } + err = authServer.SetAccessRequestState(ctx, types.AccessRequestUpdate{ + RequestID: event.Resource.(types.AccessRequest).GetName(), + State: types.RequestState_APPROVED, + }) + if err != nil { + panic(fmt.Sprintf("failed to approve request: %v", err)) + } + didAutoRequest.Store(true) + case <-watcher.Done(): + panic(fmt.Sprintf("watcher exited unexpectedly: %v", watcher.Error())) } }() @@ -475,7 +476,6 @@ func TestOIDCLogin(t *testing.T) { }) require.NoError(t, err) - require.NoError(t, <-errCh) // verify that auto-request happened require.True(t, didAutoRequest.Load()) From ab79833baa24a9173d7d13830c703e3ce83c100e Mon Sep 17 00:00:00 2001 From: lsgunn-teleport <136391445+lsgunn-teleport@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:08:28 -0700 Subject: [PATCH 10/22] Add server troubleshooting to left nav (#33224) --- docs/config.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/config.json b/docs/config.json index 8088b854aa3a0..6c14c3e1a828d 100644 --- a/docs/config.json +++ b/docs/config.json @@ -1232,6 +1232,10 @@ { "title": "Access Controls", "slug": "/server-access/rbac/" + }, + { + "title": "Troubleshooting Server Access", + "slug": "/server-access/troubleshooting-server/" } ] }, From 42e72340ff69c743d146e6f3dbb4b84c7db7bbec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Cie=C5=9Blak?= Date: Tue, 10 Oct 2023 23:12:54 +0200 Subject: [PATCH 11/22] Reword Troubleshooting section in Connect docs (#33201) --- .../connect-your-client/teleport-connect.mdx | 58 ++++++++++++------- 1 file changed, 38 insertions(+), 20 deletions(-) diff --git a/docs/pages/connect-your-client/teleport-connect.mdx b/docs/pages/connect-your-client/teleport-connect.mdx index 25ba465ebdd02..1e5eb602f1f1d 100644 --- a/docs/pages/connect-your-client/teleport-connect.mdx +++ b/docs/pages/connect-your-client/teleport-connect.mdx @@ -263,27 +263,39 @@ The changes will take effect at the next launch. ## Troubleshooting -Logging out of a cluster, closing the app and logging in again resets all app state related to that -cluster. This can help if you encounter a bug which renders the user interface partially unusable. -It might also help if you have issues with connecting to an active cluster that don't happen in the -Web UI. +Resetting the app state might help with UI crashes caused by Teleport Connect getting into an +abnormal state. This might happen after downgrading Teleport Connect or performing substantial +server-side changes. Those UI crashes typically manifest as a blank window or an Internal Error +alert. + +To reset the state related to a particular cluster: + +1. Log out of the cluster. +1. Close Teleport Connect. +1. Open Teleport Connect, then log back in to the cluster. + +To completely wipe all app state: + +1. Close Teleport Connect. +1. Remove the internal `tsh` folder and the `app_state.json` file to log out of all clusters and + clear all remembered tabs and connections. -To force the app to log you out of all clusters, close the app and remove the `~/Library/Application -Support/Teleport Connect/tsh` folder. Removing the file `~/Library/Application -Support/Teleport Connect/app_state.json` will clear all remembered tabs and connections. +```code +$ rm -rf ~/Library/Application\ Support/Teleport\ Connect/{tsh,app_state.json} +``` -To force the app to log you out of all clusters, close the app and remove the `~/.config/Teleport -Connect/tsh` folder. Removing the file `/.config/Teleport Connect/app_state.json` will clear -all remembered tabs and connections. +```code +$ rm -rf ~/.config/Teleport\ Connect/{tsh,app_state.json} +``` -To force the app to log you out of all clusters, close the app and remove the -`C:\Users\%UserName%\AppData\Roaming\Teleport Connect\tsh` folder. Removing the file -`C:\Users\%UserName%\AppData\Roaming\Teleport Connect\app_state.json` will clear all remembered tabs -and connections. +```code +$ rmdir /s /q C:\Users\%UserName%\AppData\Roaming\"Teleport Connect"\tsh +$ del C:\Users\%UserName%\AppData\Roaming\"Teleport Connect"\app_state.json +``` @@ -294,16 +306,20 @@ and follow the *Submit a Bug* link. -Be sure to attach logs, which can be found under `~/Library/Application Support/Teleport Connect/logs`. -The version of the app can be found in the app menu under the About Teleport Connect menu item. +Be sure to attach logs, which can be found in the app menu under Help -> Open Logs Directory. The +logs are stored in `~/Library/Application Support/Teleport Connect/logs`. + +The app version can be found in the app menu under Teleport Connect -> About Teleport Connect. To get more detailed logs, run Teleport Connect with the `--connect-debug` flag: (!docs/pages/connect-your-client/includes/launch-connect-with-flags-macos.mdx flags="--connect-debug"!) -Be sure to attach logs, which can be found under `~/.config/Teleport Connect/logs`. The app version -can be found by pressing `Alt` to access the app menu, then -> Help -> About Teleport Connect. +Be sure to attach logs, which can be found by pressing `Alt` to access the app menu, then Help -> +Open Logs Directory. The logs are stored in `~/.config/Teleport Connect/logs`. + +The app version can be found under Help -> About Teleport Connect. To get more detailed logs, run Teleport Connect with the `--connect-debug` flag: @@ -312,10 +328,12 @@ $ teleport-connect --connect-debug ```` -Be sure to attach logs, which can be found under `C:\Users\%UserName%\AppData\Roaming\Teleport Connect\logs`. +Be sure to attach logs, which can be found by pressing `Alt` to access the app menu, then Help -> +Open Logs Directory. The logs are stored in `C:\Users\%UserName%\AppData\Roaming\Teleport +Connect\logs`. You may need to adjust File Explorer to [view hidden files and folders](https://support.microsoft.com/en-us/search?query=how%20to%20view%20hidden%20files%20in%20windows%2010). -The app version can be found by pressing `Alt` to access the app menu -> Help -> About Teleport Connect. +The app version can be found under Help -> About Teleport Connect. To get more detailed logs, open Teleport Connect from the Command Prompt with the `--connect-debug` flag: From 953237fca3ba66d15079fa0e966deef09a72a25c Mon Sep 17 00:00:00 2001 From: Mike Jensen Date: Tue, 10 Oct 2023 15:33:45 -0600 Subject: [PATCH 12/22] [v14] utils.RecursiveChown: Fix for Privilege Escalation due to following symlinks (#33248) * utils.RecursiveChown: Harden against user access race conditions Prior to this change a user could exploit Teleports privileged access to `chown` arbitrary files on the system. This is due to the directory being changed first, allowing a small time window where a user can remove or rename the still `root` owned files with a symlink. The added tests help show this issue in a more controlled way. A switch to `os.Lchown` avoids the risk in following symlinks to files. In addition, in order to remove the risk for hardlinks (notably on OSX with reduced hardlink protections), as well as risks with directory symlinks, the folder structure is inspected before any `chown` operation. And then the files are updated before their parent directories. * Update other instances of `os.Chown` to `os.Lchown` None of these cases should expect a symlink that would need to be followed. --- lib/events/auditlog.go | 6 +- lib/service/service.go | 2 +- lib/srv/reexec.go | 2 +- lib/srv/term.go | 2 +- lib/teleagent/agent.go | 4 +- lib/utils/fs.go | 31 +++++++--- lib/utils/fs_unix_test.go | 121 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 151 insertions(+), 17 deletions(-) create mode 100644 lib/utils/fs_unix_test.go diff --git a/lib/events/auditlog.go b/lib/events/auditlog.go index ff2db8b0d27f7..3856fb0133533 100644 --- a/lib/events/auditlog.go +++ b/lib/events/auditlog.go @@ -296,15 +296,15 @@ func NewAuditLog(cfg AuditLogConfig) (*AuditLog, error) { return nil, trace.ConvertSystemError(err) } if cfg.UID != nil && cfg.GID != nil { - err := os.Chown(cfg.DataDir, *cfg.UID, *cfg.GID) + err := os.Lchown(cfg.DataDir, *cfg.UID, *cfg.GID) if err != nil { return nil, trace.ConvertSystemError(err) } - err = os.Chown(sessionDir, *cfg.UID, *cfg.GID) + err = os.Lchown(sessionDir, *cfg.UID, *cfg.GID) if err != nil { return nil, trace.ConvertSystemError(err) } - err = os.Chown(al.playbackDir, *cfg.UID, *cfg.GID) + err = os.Lchown(al.playbackDir, *cfg.UID, *cfg.GID) if err != nil { return nil, trace.ConvertSystemError(err) } diff --git a/lib/service/service.go b/lib/service/service.go index 7a86a96272269..1c370bb4b8e5f 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -2874,7 +2874,7 @@ func (process *TeleportProcess) initUploaderService() error { } if uid != nil && gid != nil { log.Infof("Setting directory %v owner to %v:%v.", dir, *uid, *gid) - err := os.Chown(dir, *uid, *gid) + err := os.Lchown(dir, *uid, *gid) if err != nil { return trace.ConvertSystemError(err) } diff --git a/lib/srv/reexec.go b/lib/srv/reexec.go index 10aaff2682b99..51042fadaa7a6 100644 --- a/lib/srv/reexec.go +++ b/lib/srv/reexec.go @@ -403,7 +403,7 @@ func RunCommand() (errw io.Writer, code int, err error) { if err != nil { return errorWriter, teleport.RemoteCommandFailure, trace.Wrap(err) } - if err := os.Chown(c.X11Config.XServerUnixSocket, uid, gid); err != nil { + if err := os.Lchown(c.X11Config.XServerUnixSocket, uid, gid); err != nil { return errorWriter, teleport.RemoteCommandFailure, trace.Wrap(err) } diff --git a/lib/srv/term.go b/lib/srv/term.go index 7b7b1c77357b2..66ab8f4d2c856 100644 --- a/lib/srv/term.go +++ b/lib/srv/term.go @@ -472,7 +472,7 @@ func (t *terminal) setOwner() error { return trace.Wrap(err) } - err = os.Chown(t.tty.Name(), uid, gid) + err = os.Lchown(t.tty.Name(), uid, gid) if err != nil { return trace.Wrap(err) } diff --git a/lib/teleagent/agent.go b/lib/teleagent/agent.go index 31cc144985ace..cac5ff857c80d 100644 --- a/lib/teleagent/agent.go +++ b/lib/teleagent/agent.go @@ -131,7 +131,7 @@ func (a *AgentServer) updatePermissions(user *user.User) error { testPermissions() - if err := os.Chown(a.Path, uid, gid); err != nil { + if err := os.Lchown(a.Path, uid, gid); err != nil { return trace.ConvertSystemError(err) } @@ -139,7 +139,7 @@ func (a *AgentServer) updatePermissions(user *user.User) error { // To prevent a privilege escalation attack, this must occur // after the socket permissions are updated. - if err := os.Chown(a.Dir, uid, gid); err != nil { + if err := os.Lchown(a.Dir, uid, gid); err != nil { return trace.ConvertSystemError(err) } diff --git a/lib/utils/fs.go b/lib/utils/fs.go index 5cb56a8f55c05..36322877f83b3 100644 --- a/lib/utils/fs.go +++ b/lib/utils/fs.go @@ -372,19 +372,32 @@ func RemoveFileIfExist(filePath string) error { } func RecursiveChown(dir string, uid, gid int) error { - if err := os.Chown(dir, uid, gid); err != nil { - return trace.Wrap(err) - } - return trace.Wrap(filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + // First, walk the directory to gather a list of files and directories to update before we open up to modifications + var pathsToUpdate []string + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err != nil { return trace.Wrap(err) } - err = os.Chown(path, uid, gid) - if os.IsNotExist(err) { // empty symlinks cause an error here - return nil - } + pathsToUpdate = append(pathsToUpdate, path) + return nil + }) + if err != nil { return trace.Wrap(err) - })) + } + + // filepath.WalkDir is documented to walk the paths in lexical order, iterating + // in the reverse order ensures that files are always Lchowned before their parent directory + for i := len(pathsToUpdate) - 1; i >= 0; i-- { + path := pathsToUpdate[i] + if err := os.Lchown(path, uid, gid); err != nil { + if errors.Is(err, os.ErrNotExist) { + // Unexpected condition where file was removed after discovery. + continue + } + return trace.Wrap(err) + } + } + return nil } func CopyFile(src, dest string, perm os.FileMode) error { diff --git a/lib/utils/fs_unix_test.go b/lib/utils/fs_unix_test.go new file mode 100644 index 0000000000000..8d430694262f4 --- /dev/null +++ b/lib/utils/fs_unix_test.go @@ -0,0 +1,121 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2023 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "os" + "os/user" + "path/filepath" + "strconv" + "syscall" + "testing" + + "github.com/stretchr/testify/require" +) + +// The tests contained here only function on unix systems. + +func setupRecursiveChownFiles(t *testing.T) (string, string, string, string) { + // Setup will produce the following structure under the temp directory created below: + // dir1/ + // dir1/file + // dir2/ + // dir2/file-s -> dir1/file + rootDir := t.TempDir() + + dir1Path := filepath.Join(rootDir, "dir1") + require.NoError(t, os.Mkdir(dir1Path, 0755)) + + dir1FilePath := filepath.Join(dir1Path, "file") + f, err := os.Create(dir1FilePath) + require.NoError(t, err) + f.Close() + + dir2Path := filepath.Join(rootDir, "dir2") + require.NoError(t, os.Mkdir(dir2Path, 0755)) + + dir2SymlinkToFile := filepath.Join(dir2Path, "file-s") + err = os.Symlink(dir1FilePath, dir2SymlinkToFile) + require.NoError(t, err) + + return dir1Path, dir1FilePath, dir2Path, dir2SymlinkToFile +} + +func setupRecursiveChownUser(t *testing.T) (int, int, int, int, bool) { + currentUser, err := user.Current() + require.NoError(t, err) + + currentUID, err := strconv.Atoi(currentUser.Uid) + require.NoError(t, err) + currentGID, err := strconv.Atoi(currentUser.Gid) + require.NoError(t, err) + + root := os.Geteuid() == 0 + newUid := currentUID + 1 + newGid := currentGID + 1 + if !root { + // `root` is required to actually change ownership, if running under a normal user we will reduce the validation + newUid = currentUID + newGid = currentGID + } + + return currentUID, currentGID, newUid, newGid, root +} + +func verifyOwnership(t *testing.T, path string, uid, gid int) { + fi, err := os.Lstat(path) + require.NoError(t, err) + fiCast := fi.Sys().(*syscall.Stat_t) + require.Equal(t, uint32(uid), fiCast.Uid) + require.Equal(t, uint32(gid), fiCast.Gid) +} + +func TestRecursiveChown(t *testing.T) { + t.Run("notFoundError", func(t *testing.T) { + t.Parallel() + + require.Error(t, RecursiveChown("/invalid/path/to/nowhere", 1000, 1000)) + }) + t.Run("simpleChown", func(t *testing.T) { + t.Parallel() + _, _, newUid, newGid, _ := setupRecursiveChownUser(t) + dir1Path, dir1FilePath, _, _ := setupRecursiveChownFiles(t) + + require.NoError(t, RecursiveChown(dir1Path, newUid, newGid)) + // validate ownership matches expected ids + verifyOwnership(t, dir1Path, newUid, newGid) + verifyOwnership(t, dir1FilePath, newUid, newGid) + }) + t.Run("symlinkChown", func(t *testing.T) { + t.Parallel() + origUid, origGid, newUid, newGid, root := setupRecursiveChownUser(t) + if !root { + t.Skip("Skipping test, root is required") + return + } + _, dir1FilePath, dir2Path, dir2SymlinkToFile := setupRecursiveChownFiles(t) + + require.NoError(t, RecursiveChown(dir2Path, newUid, newGid)) + // Validate symlink has changed + verifyOwnership(t, dir2SymlinkToFile, newUid, newGid) + // Validate pointed file has not changed + verifyOwnership(t, dir1FilePath, origUid, origGid) + }) +} From 9e0fe96878baa3615cbec6bf22d69633bbe039f2 Mon Sep 17 00:00:00 2001 From: lsgunn-teleport <136391445+lsgunn-teleport@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:40:34 -0700 Subject: [PATCH 13/22] [v14] [buddy] docs: minor typos and improvements in the description of the Teleport Proxy Service (#33184) * Minor typos * Update docs/pages/architecture/proxy.mdx * fix capitalization and hyphenation and make features more parallel * fix identity typo --------- Co-authored-by: Gabriel Petrovay --- docs/pages/architecture/proxy.mdx | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/docs/pages/architecture/proxy.mdx b/docs/pages/architecture/proxy.mdx index 77d1bf7e5b4a9..6efa616806871 100644 --- a/docs/pages/architecture/proxy.mdx +++ b/docs/pages/architecture/proxy.mdx @@ -4,14 +4,18 @@ description: Architecture of Teleport's identity-aware proxy service h1: Teleport Identity-Aware Proxy Service --- -Teleport Proxy is a identity aware proxy, with a web UI. Here are Proxy's key features: - -- Users can authenticate with a Single-Sign-On or local credentials to access SSH and Windows Desktops via Proxy's web UI. -- Proxy is identity aware - it makes sure that only authenticated clients can connect to target resources. -It intercepts traffic for multiple protocols - SSH, Kubernetes, HTTPS, databases. -It records commands, API calls and queries and streams them to the audit log. -- Proxy provides networking and connectivity features. Nodes and proxies behind firewalls can connect -to proxies using reverse tunnels. System administrators can use TLS routing feature to compress all ports for all protocols to one TLS port using TLS routing feature. +The Teleport Proxy Service is an identity-aware proxy with a web UI. The Teleport Proxy Service +provides the following key features: + +- Enables users to authenticate with a single sign-on identity provider or local credentials to access + SSH and Windows desktops using the Teleport web UI. +- Intercepts traffic for multiple protocols, including SSH, Kubernetes, HTTPS, + and databases, and ensures that only authenticated clients can connect to target resources. +- Records commands, API calls, and queries and streams them to the audit log. +- Provides networking and connectivity so that servers and proxies behind firewalls can connect + using reverse tunnels. S +- Enables system administrators to use TLS routing feature to compress all ports for all protocols + to one TLS port using TLS routing feature. ![Proxy service](../../img/architecture/proxy.png) From 8049c4626ef56a00c12dd5ff01b1826f14d072fe Mon Sep 17 00:00:00 2001 From: Alan Parra Date: Tue, 10 Oct 2023 19:10:11 -0300 Subject: [PATCH 14/22] chore: Bump google.golang.org/grpc to v1.57.1 (#33265) Update due to recent security patches. * https://github.com/grpc/grpc-go/releases/tag/v1.57.1 --- api/go.mod | 2 +- api/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/api/go.mod b/api/go.mod index 28b31e2180095..646350fb0a6ec 100644 --- a/api/go.mod +++ b/api/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/net v0.14.0 golang.org/x/term v0.11.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 - google.golang.org/grpc v1.57.0 + google.golang.org/grpc v1.57.1 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/api/go.sum b/api/go.sum index ebc6be17fadf4..2125d1c3ae683 100644 --- a/api/go.sum +++ b/api/go.sum @@ -249,8 +249,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg= +google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/go.mod b/go.mod index 5b789c306170b..1d0ca43b84825 100644 --- a/go.mod +++ b/go.mod @@ -176,7 +176,7 @@ require ( golang.org/x/time v0.3.0 google.golang.org/api v0.138.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 - google.golang.org/grpc v1.57.0 + google.golang.org/grpc v1.57.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.31.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c diff --git a/go.sum b/go.sum index 1b644770d8528..0634fce4236e5 100644 --- a/go.sum +++ b/go.sum @@ -2220,8 +2220,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg= +google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= From d3bf2fb9042fc6dd004ad7776478ce13987fd4cc Mon Sep 17 00:00:00 2001 From: Andrew LeFevre Date: Tue, 10 Oct 2023 18:23:58 -0400 Subject: [PATCH 15/22] [v14] re-add agentless node manual installation docs (#32811) * re-add agentless node manual installation docs * fix linter issues * Add missing closing paren Co-authored-by: Paul Gottschling * use tabs in manual install guide * removed more ScopedBlocks I missed * add note about upgrading to v14 * add redirect and notes linking the two guides to one another * addressed feedback * Apply suggestions from code review Co-authored-by: Paul Gottschling * fix links --------- Co-authored-by: Paul Gottschling --- docs/config.json | 11 ++++++++++- docs/pages/server-access/guides.mdx | 4 +++- docs/pages/server-access/guides/openssh.mdx | 9 +++++++++ .../guides/openssh/openssh-manual-install.mdx | 3 +-- docs/pages/server-access/guides/openssh/openssh.mdx | 2 +- 5 files changed, 24 insertions(+), 5 deletions(-) create mode 100644 docs/pages/server-access/guides/openssh.mdx diff --git a/docs/config.json b/docs/config.json index 6c14c3e1a828d..92ea9f8a31163 100644 --- a/docs/config.json +++ b/docs/config.json @@ -1177,7 +1177,11 @@ }, { "title": "Agentless OpenSSH Integration", - "slug": "/server-access/guides/openssh/" + "slug": "/server-access/guides/openssh/openssh/" + }, + { + "title": "Agentless OpenSSH Integration (Manual Install)", + "slug": "/server-access/guides/openssh/openssh-manual-install/" }, { "title": "Recording Proxy Mode", @@ -3100,6 +3104,11 @@ "source": "/management/guides/terraform-provider/", "destination": "/management/dynamic-resources/terraform-provider/", "permanent": true + }, + { + "source": "/server-access/guides/openssh/", + "destination": "/server-access/guides/openssh/openssh/", + "permanent": true } ] } diff --git a/docs/pages/server-access/guides.mdx b/docs/pages/server-access/guides.mdx index 9d855dab8a81a..e760e047dc2c9 100644 --- a/docs/pages/server-access/guides.mdx +++ b/docs/pages/server-access/guides.mdx @@ -5,7 +5,9 @@ layout: tocless-doc --- - [Using Teleport with PAM](./guides/ssh-pam.mdx): How to configure Teleport SSH with PAM (Pluggable Authentication Modules). -- [Agentless OpenSSH Integration](./guides/openssh.mdx): How to use Teleport in agentless mode on systems with OpenSSH and `sshd`. +- [Agentless OpenSSH Integration](./guides/openssh/openssh.mdx): How to use Teleport in agentless mode on systems with OpenSSH and `sshd`. +- [Agentless OpenSSH Integration (Manual Installation)](./guides/openssh/openssh-manual-install.mdx): How to use Teleport in agentless mode + on systems with OpenSSH and `sshd` that can't run `teleport`. - [Recording Proxy Mode](./guides/recording-proxy-mode.mdx): How to use Teleport Recording Proxy Mode to capture activity on OpenSSH servers. - [BPF Session Recording](./guides/bpf-session-recording.mdx): How to use BPF to record SSH session commands, modified files and network connections. - [Restricted Session](./guides/restricted-session.mdx): How to configure and use Restricted Session to apply security policies to SSH sessions. diff --git a/docs/pages/server-access/guides/openssh.mdx b/docs/pages/server-access/guides/openssh.mdx new file mode 100644 index 0000000000000..b527979d66d6c --- /dev/null +++ b/docs/pages/server-access/guides/openssh.mdx @@ -0,0 +1,9 @@ +--- +title: OpenSSH Guides +description: Teleport Agentless OpenSSH integration guides. +layout: tocless-doc +--- + +- [Agentless OpenSSH Integration](./openssh/openssh.mdx): How to use Teleport in agentless mode on systems with OpenSSH and `sshd`. +- [Agentless OpenSSH Integration (Manual Installation)](./openssh/openssh-manual-install.mdx): How to use Teleport in agentless mode + on systems with OpenSSH and `sshd` that can't run `teleport`. diff --git a/docs/pages/server-access/guides/openssh/openssh-manual-install.mdx b/docs/pages/server-access/guides/openssh/openssh-manual-install.mdx index 3c38168460143..fc5cbed0815fd 100644 --- a/docs/pages/server-access/guides/openssh/openssh-manual-install.mdx +++ b/docs/pages/server-access/guides/openssh/openssh-manual-install.mdx @@ -73,7 +73,6 @@ In this setup, the Teleport SSH Service performs RBAC checks as well as audits a Proxy Service and set it to `yes`. This will allow connections to unregistered OpenSSH nodes but will be removed in Teleport v15. - ## Step 1/5. Add a node resource to your Teleport cluster When you request an SSH connection to a OpenSSH node, Teleport needs to be able @@ -533,4 +532,4 @@ $ ssh -F ssh_config_teleport ${USER?}@node2.leafcluster.${CLUSTER} `Principals` section of the OpenSSH certificate. Usually, this is a fully qualified domain name, rather than an IP address. - \ No newline at end of file + diff --git a/docs/pages/server-access/guides/openssh/openssh.mdx b/docs/pages/server-access/guides/openssh/openssh.mdx index 19089fe051907..2992492c2a2bb 100644 --- a/docs/pages/server-access/guides/openssh/openssh.mdx +++ b/docs/pages/server-access/guides/openssh/openssh.mdx @@ -347,4 +347,4 @@ $ ssh -F ssh_config_teleport ${USER?}@node2.leafcluster.${CLUSTER} `Principals` section of the OpenSSH certificate. Usually, this is a fully qualified domain name, rather than an IP address. - \ No newline at end of file + From 09ce81cfde212faae12a8fa921ecaecee1f8dd43 Mon Sep 17 00:00:00 2001 From: Lisa Kim Date: Tue, 10 Oct 2023 19:29:23 -0700 Subject: [PATCH 16/22] Update e (#33280) --- e | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e b/e index 44bb30b1470b9..300df82e896be 160000 --- a/e +++ b/e @@ -1 +1 @@ -Subproject commit 44bb30b1470b9bc82e02fc4a4646efcc77d1fca0 +Subproject commit 300df82e896bec915a317df367958c13fb5569a6 From a390698ebc71dd16daae83ba811b2013c2bf4a82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marco=20Andr=C3=A9=20Dinis?= Date: Wed, 11 Oct 2023 15:05:25 +0100 Subject: [PATCH 17/22] DiscoveryConfig: init service and add resource to `tctl` (#32399) (#33289) * DiscoveryConfig: init service and add it to `tctl` This PR starts the DiscoveryConfig service in gRPC server and allows `tctl` to interact with those records. It also adds access to the `editor` role. Users should be able to RW any DiscoveryConfig. DiscoveryService should be able to watch those resources, so that it can act upon any changes. * add revision * add upsert method * improve tctl -f command --- api/client/discoveryconfig/discoveryconfig.go | 12 + api/client/events.go | 12 + .../v1/discoveryconfig_service.pb.go | 265 ++++++++++++------ .../v1/discoveryconfig_service_grpc.pb.go | 39 +++ .../v1/discoveryconfig_service.proto | 9 + lib/auth/auth.go | 13 + lib/auth/auth_with_roles.go | 10 + lib/auth/clt.go | 11 + .../discoveryconfigv1/service.go | 20 ++ .../discoveryconfigv1/service_test.go | 37 +++ lib/auth/grpcserver.go | 12 + lib/auth/init.go | 3 + lib/authz/permissions.go | 1 + lib/cache/cache.go | 39 +++ lib/cache/cache_test.go | 72 +++++ lib/cache/collections.go | 55 ++++ lib/service/service.go | 1 + lib/services/discoveryconfig.go | 2 + lib/services/local/discoveryconfig.go | 13 + lib/services/local/discoveryconfig_test.go | 17 +- lib/services/local/events.go | 26 ++ lib/services/presets.go | 1 + lib/services/resource.go | 2 + lib/services/services.go | 1 + lib/services/useracl.go | 4 + lib/services/useracl_test.go | 1 + tool/tctl/common/collection.go | 25 ++ tool/tctl/common/resource_command.go | 61 ++++ tool/tctl/common/resource_command_test.go | 126 +++++++++ 29 files changed, 797 insertions(+), 93 deletions(-) diff --git a/api/client/discoveryconfig/discoveryconfig.go b/api/client/discoveryconfig/discoveryconfig.go index ca531609e5096..60bacb2d42294 100644 --- a/api/client/discoveryconfig/discoveryconfig.go +++ b/api/client/discoveryconfig/discoveryconfig.go @@ -96,6 +96,18 @@ func (c *Client) UpdateDiscoveryConfig(ctx context.Context, discoveryConfig *dis return dc, trace.Wrap(err) } +// UpsertDiscoveryConfig creates or updates a DiscoveryConfig. +func (c *Client) UpsertDiscoveryConfig(ctx context.Context, discoveryConfig *discoveryconfig.DiscoveryConfig) (*discoveryconfig.DiscoveryConfig, error) { + resp, err := c.grpcClient.UpsertDiscoveryConfig(ctx, &discoveryconfigv1.UpsertDiscoveryConfigRequest{ + DiscoveryConfig: conv.ToProto(discoveryConfig), + }) + if err != nil { + return nil, trace.Wrap(err) + } + dc, err := conv.FromProto(resp) + return dc, trace.Wrap(err) +} + // DeleteDiscoveryConfig removes the specified DiscoveryConfig resource. func (c *Client) DeleteDiscoveryConfig(ctx context.Context, name string) error { _, err := c.grpcClient.DeleteDiscoveryConfig(ctx, &discoveryconfigv1.DeleteDiscoveryConfigRequest{ diff --git a/api/client/events.go b/api/client/events.go index d8d660ea37c29..a881ca9d8d74f 100644 --- a/api/client/events.go +++ b/api/client/events.go @@ -21,6 +21,8 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" accesslistv1conv "github.com/gravitational/teleport/api/types/accesslist/convert/v1" + "github.com/gravitational/teleport/api/types/discoveryconfig" + discoveryconfigv1conv "github.com/gravitational/teleport/api/types/discoveryconfig/convert/v1" "github.com/gravitational/teleport/api/types/userloginstate" userloginstatev1conv "github.com/gravitational/teleport/api/types/userloginstate/convert/v1" ) @@ -226,6 +228,10 @@ func EventToGRPC(in types.Event) (*proto.Event, error) { out.Resource = &proto.Event_AccessListMember{ AccessListMember: accesslistv1conv.ToMemberProto(r), } + case *discoveryconfig.DiscoveryConfig: + out.Resource = &proto.Event_DiscoveryConfig{ + DiscoveryConfig: discoveryconfigv1conv.ToProto(r), + } default: return nil, trace.BadParameter("resource type %T is not supported", in.Resource) } @@ -399,6 +405,12 @@ func EventFromGRPC(in *proto.Event) (*types.Event, error) { return nil, trace.Wrap(err) } return &out, nil + } else if r := in.GetDiscoveryConfig(); r != nil { + out.Resource, err = discoveryconfigv1conv.FromProto(r) + if err != nil { + return nil, trace.Wrap(err) + } + return &out, nil } else { return nil, trace.BadParameter("received unsupported resource %T", in.Resource) } diff --git a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go index 8419d38c3c510..47d87275d24d0 100644 --- a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go +++ b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service.pb.go @@ -307,6 +307,55 @@ func (x *UpdateDiscoveryConfigRequest) GetDiscoveryConfig() *DiscoveryConfig { return nil } +// UpsertDiscoveryConfigRequest is the request to upsert the provided DiscoveryConfig. +type UpsertDiscoveryConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DiscoveryConfig is the DiscoveryConfig to be upserted. + DiscoveryConfig *DiscoveryConfig `protobuf:"bytes,1,opt,name=discovery_config,json=discoveryConfig,proto3" json:"discovery_config,omitempty"` +} + +func (x *UpsertDiscoveryConfigRequest) Reset() { + *x = UpsertDiscoveryConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpsertDiscoveryConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpsertDiscoveryConfigRequest) ProtoMessage() {} + +func (x *UpsertDiscoveryConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpsertDiscoveryConfigRequest.ProtoReflect.Descriptor instead. +func (*UpsertDiscoveryConfigRequest) Descriptor() ([]byte, []int) { + return file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDescGZIP(), []int{5} +} + +func (x *UpsertDiscoveryConfigRequest) GetDiscoveryConfig() *DiscoveryConfig { + if x != nil { + return x.DiscoveryConfig + } + return nil +} + // DeleteDiscoveryConfigRequest is a request for deleting a specific DiscoveryConfig resource. type DeleteDiscoveryConfigRequest struct { state protoimpl.MessageState @@ -320,7 +369,7 @@ type DeleteDiscoveryConfigRequest struct { func (x *DeleteDiscoveryConfigRequest) Reset() { *x = DeleteDiscoveryConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[5] + mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -333,7 +382,7 @@ func (x *DeleteDiscoveryConfigRequest) String() string { func (*DeleteDiscoveryConfigRequest) ProtoMessage() {} func (x *DeleteDiscoveryConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[5] + mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -346,7 +395,7 @@ func (x *DeleteDiscoveryConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteDiscoveryConfigRequest.ProtoReflect.Descriptor instead. func (*DeleteDiscoveryConfigRequest) Descriptor() ([]byte, []int) { - return file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDescGZIP(), []int{5} + return file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDescGZIP(), []int{6} } func (x *DeleteDiscoveryConfigRequest) GetName() string { @@ -366,7 +415,7 @@ type DeleteAllDiscoveryConfigsRequest struct { func (x *DeleteAllDiscoveryConfigsRequest) Reset() { *x = DeleteAllDiscoveryConfigsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[6] + mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -379,7 +428,7 @@ func (x *DeleteAllDiscoveryConfigsRequest) String() string { func (*DeleteAllDiscoveryConfigsRequest) ProtoMessage() {} func (x *DeleteAllDiscoveryConfigsRequest) ProtoReflect() protoreflect.Message { - mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[6] + mi := &file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -392,7 +441,7 @@ func (x *DeleteAllDiscoveryConfigsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteAllDiscoveryConfigsRequest.ProtoReflect.Descriptor instead. func (*DeleteAllDiscoveryConfigsRequest) Descriptor() ([]byte, []int) { - return file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDescGZIP(), []int{6} + return file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDescGZIP(), []int{7} } var File_teleport_discoveryconfig_v1_discoveryconfig_service_proto protoreflect.FileDescriptor @@ -443,68 +492,84 @@ var file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDesc = []b 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x64, - 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x32, - 0x0a, 0x1c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x22, 0x0a, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x44, - 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x32, 0x88, 0x06, 0x0a, 0x16, 0x44, 0x69, 0x73, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, - 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x38, 0x2e, 0x74, 0x65, 0x6c, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x73, - 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, - 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x7a, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, - 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, - 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x15, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, - 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, - 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x80, - 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, - 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x77, + 0x0a, 0x1c, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x57, + 0x0a, 0x10, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, - 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, + 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x32, 0x0a, 0x1c, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x22, 0x0a, 0x20, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x32, + 0x8b, 0x07, 0x0a, 0x16, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x14, 0x4c, + 0x69, 0x73, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x12, 0x38, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x6a, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, - 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6c, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, - 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, + 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, + 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x15, 0x55, + 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6a, 0x0a, + 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x72, 0x0a, 0x19, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x72, 0x0a, - 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, - 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x2e, 0x74, 0x65, 0x6c, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, - 0x6c, 0x6c, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x42, 0x62, 0x5a, 0x60, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x67, 0x72, 0x61, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, - 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2f, 0x76, 0x31, 0x3b, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x62, 0x5a, + 0x60, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x76, + 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x67, 0x6f, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x64, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x76, 0x31, 0x3b, + 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x76, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -519,39 +584,43 @@ func file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDescGZIP( return file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDescData } -var file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_goTypes = []interface{}{ (*ListDiscoveryConfigsRequest)(nil), // 0: teleport.discoveryconfig.v1.ListDiscoveryConfigsRequest (*ListDiscoveryConfigsResponse)(nil), // 1: teleport.discoveryconfig.v1.ListDiscoveryConfigsResponse (*GetDiscoveryConfigRequest)(nil), // 2: teleport.discoveryconfig.v1.GetDiscoveryConfigRequest (*CreateDiscoveryConfigRequest)(nil), // 3: teleport.discoveryconfig.v1.CreateDiscoveryConfigRequest (*UpdateDiscoveryConfigRequest)(nil), // 4: teleport.discoveryconfig.v1.UpdateDiscoveryConfigRequest - (*DeleteDiscoveryConfigRequest)(nil), // 5: teleport.discoveryconfig.v1.DeleteDiscoveryConfigRequest - (*DeleteAllDiscoveryConfigsRequest)(nil), // 6: teleport.discoveryconfig.v1.DeleteAllDiscoveryConfigsRequest - (*DiscoveryConfig)(nil), // 7: teleport.discoveryconfig.v1.DiscoveryConfig - (*emptypb.Empty)(nil), // 8: google.protobuf.Empty + (*UpsertDiscoveryConfigRequest)(nil), // 5: teleport.discoveryconfig.v1.UpsertDiscoveryConfigRequest + (*DeleteDiscoveryConfigRequest)(nil), // 6: teleport.discoveryconfig.v1.DeleteDiscoveryConfigRequest + (*DeleteAllDiscoveryConfigsRequest)(nil), // 7: teleport.discoveryconfig.v1.DeleteAllDiscoveryConfigsRequest + (*DiscoveryConfig)(nil), // 8: teleport.discoveryconfig.v1.DiscoveryConfig + (*emptypb.Empty)(nil), // 9: google.protobuf.Empty } var file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_depIdxs = []int32{ - 7, // 0: teleport.discoveryconfig.v1.ListDiscoveryConfigsResponse.discovery_configs:type_name -> teleport.discoveryconfig.v1.DiscoveryConfig - 7, // 1: teleport.discoveryconfig.v1.CreateDiscoveryConfigRequest.discovery_config:type_name -> teleport.discoveryconfig.v1.DiscoveryConfig - 7, // 2: teleport.discoveryconfig.v1.UpdateDiscoveryConfigRequest.discovery_config:type_name -> teleport.discoveryconfig.v1.DiscoveryConfig - 0, // 3: teleport.discoveryconfig.v1.DiscoveryConfigService.ListDiscoveryConfigs:input_type -> teleport.discoveryconfig.v1.ListDiscoveryConfigsRequest - 2, // 4: teleport.discoveryconfig.v1.DiscoveryConfigService.GetDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.GetDiscoveryConfigRequest - 3, // 5: teleport.discoveryconfig.v1.DiscoveryConfigService.CreateDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.CreateDiscoveryConfigRequest - 4, // 6: teleport.discoveryconfig.v1.DiscoveryConfigService.UpdateDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.UpdateDiscoveryConfigRequest - 5, // 7: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.DeleteDiscoveryConfigRequest - 6, // 8: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteAllDiscoveryConfigs:input_type -> teleport.discoveryconfig.v1.DeleteAllDiscoveryConfigsRequest - 1, // 9: teleport.discoveryconfig.v1.DiscoveryConfigService.ListDiscoveryConfigs:output_type -> teleport.discoveryconfig.v1.ListDiscoveryConfigsResponse - 7, // 10: teleport.discoveryconfig.v1.DiscoveryConfigService.GetDiscoveryConfig:output_type -> teleport.discoveryconfig.v1.DiscoveryConfig - 7, // 11: teleport.discoveryconfig.v1.DiscoveryConfigService.CreateDiscoveryConfig:output_type -> teleport.discoveryconfig.v1.DiscoveryConfig - 7, // 12: teleport.discoveryconfig.v1.DiscoveryConfigService.UpdateDiscoveryConfig:output_type -> teleport.discoveryconfig.v1.DiscoveryConfig - 8, // 13: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteDiscoveryConfig:output_type -> google.protobuf.Empty - 8, // 14: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteAllDiscoveryConfigs:output_type -> google.protobuf.Empty - 9, // [9:15] is the sub-list for method output_type - 3, // [3:9] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 8, // 0: teleport.discoveryconfig.v1.ListDiscoveryConfigsResponse.discovery_configs:type_name -> teleport.discoveryconfig.v1.DiscoveryConfig + 8, // 1: teleport.discoveryconfig.v1.CreateDiscoveryConfigRequest.discovery_config:type_name -> teleport.discoveryconfig.v1.DiscoveryConfig + 8, // 2: teleport.discoveryconfig.v1.UpdateDiscoveryConfigRequest.discovery_config:type_name -> teleport.discoveryconfig.v1.DiscoveryConfig + 8, // 3: teleport.discoveryconfig.v1.UpsertDiscoveryConfigRequest.discovery_config:type_name -> teleport.discoveryconfig.v1.DiscoveryConfig + 0, // 4: teleport.discoveryconfig.v1.DiscoveryConfigService.ListDiscoveryConfigs:input_type -> teleport.discoveryconfig.v1.ListDiscoveryConfigsRequest + 2, // 5: teleport.discoveryconfig.v1.DiscoveryConfigService.GetDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.GetDiscoveryConfigRequest + 3, // 6: teleport.discoveryconfig.v1.DiscoveryConfigService.CreateDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.CreateDiscoveryConfigRequest + 4, // 7: teleport.discoveryconfig.v1.DiscoveryConfigService.UpdateDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.UpdateDiscoveryConfigRequest + 5, // 8: teleport.discoveryconfig.v1.DiscoveryConfigService.UpsertDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.UpsertDiscoveryConfigRequest + 6, // 9: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteDiscoveryConfig:input_type -> teleport.discoveryconfig.v1.DeleteDiscoveryConfigRequest + 7, // 10: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteAllDiscoveryConfigs:input_type -> teleport.discoveryconfig.v1.DeleteAllDiscoveryConfigsRequest + 1, // 11: teleport.discoveryconfig.v1.DiscoveryConfigService.ListDiscoveryConfigs:output_type -> teleport.discoveryconfig.v1.ListDiscoveryConfigsResponse + 8, // 12: teleport.discoveryconfig.v1.DiscoveryConfigService.GetDiscoveryConfig:output_type -> teleport.discoveryconfig.v1.DiscoveryConfig + 8, // 13: teleport.discoveryconfig.v1.DiscoveryConfigService.CreateDiscoveryConfig:output_type -> teleport.discoveryconfig.v1.DiscoveryConfig + 8, // 14: teleport.discoveryconfig.v1.DiscoveryConfigService.UpdateDiscoveryConfig:output_type -> teleport.discoveryconfig.v1.DiscoveryConfig + 8, // 15: teleport.discoveryconfig.v1.DiscoveryConfigService.UpsertDiscoveryConfig:output_type -> teleport.discoveryconfig.v1.DiscoveryConfig + 9, // 16: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteDiscoveryConfig:output_type -> google.protobuf.Empty + 9, // 17: teleport.discoveryconfig.v1.DiscoveryConfigService.DeleteAllDiscoveryConfigs:output_type -> google.protobuf.Empty + 11, // [11:18] is the sub-list for method output_type + 4, // [4:11] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_init() } @@ -622,7 +691,7 @@ func file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_init() { } } file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteDiscoveryConfigRequest); i { + switch v := v.(*UpsertDiscoveryConfigRequest); i { case 0: return &v.state case 1: @@ -634,6 +703,18 @@ func file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_init() { } } file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteDiscoveryConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteAllDiscoveryConfigsRequest); i { case 0: return &v.state @@ -652,7 +733,7 @@ func file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_teleport_discoveryconfig_v1_discoveryconfig_service_proto_rawDesc, NumEnums: 0, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, diff --git a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service_grpc.pb.go b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service_grpc.pb.go index 8f7bb7f5e9d01..b5aebac7b8294 100644 --- a/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service_grpc.pb.go +++ b/api/gen/proto/go/teleport/discoveryconfig/v1/discoveryconfig_service_grpc.pb.go @@ -38,6 +38,7 @@ const ( DiscoveryConfigService_GetDiscoveryConfig_FullMethodName = "/teleport.discoveryconfig.v1.DiscoveryConfigService/GetDiscoveryConfig" DiscoveryConfigService_CreateDiscoveryConfig_FullMethodName = "/teleport.discoveryconfig.v1.DiscoveryConfigService/CreateDiscoveryConfig" DiscoveryConfigService_UpdateDiscoveryConfig_FullMethodName = "/teleport.discoveryconfig.v1.DiscoveryConfigService/UpdateDiscoveryConfig" + DiscoveryConfigService_UpsertDiscoveryConfig_FullMethodName = "/teleport.discoveryconfig.v1.DiscoveryConfigService/UpsertDiscoveryConfig" DiscoveryConfigService_DeleteDiscoveryConfig_FullMethodName = "/teleport.discoveryconfig.v1.DiscoveryConfigService/DeleteDiscoveryConfig" DiscoveryConfigService_DeleteAllDiscoveryConfigs_FullMethodName = "/teleport.discoveryconfig.v1.DiscoveryConfigService/DeleteAllDiscoveryConfigs" ) @@ -54,6 +55,8 @@ type DiscoveryConfigServiceClient interface { CreateDiscoveryConfig(ctx context.Context, in *CreateDiscoveryConfigRequest, opts ...grpc.CallOption) (*DiscoveryConfig, error) // UpdateDiscoveryConfig updates an existing DiscoveryConfig resource. UpdateDiscoveryConfig(ctx context.Context, in *UpdateDiscoveryConfigRequest, opts ...grpc.CallOption) (*DiscoveryConfig, error) + // UpsertDiscoveryConfig creates or updates a DiscoveryConfig resource. + UpsertDiscoveryConfig(ctx context.Context, in *UpsertDiscoveryConfigRequest, opts ...grpc.CallOption) (*DiscoveryConfig, error) // DeleteDiscoveryConfig removes the specified DiscoveryConfig resource. DeleteDiscoveryConfig(ctx context.Context, in *DeleteDiscoveryConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // DeleteAllDiscoveryConfigs removes all DiscoveryConfigs. @@ -104,6 +107,15 @@ func (c *discoveryConfigServiceClient) UpdateDiscoveryConfig(ctx context.Context return out, nil } +func (c *discoveryConfigServiceClient) UpsertDiscoveryConfig(ctx context.Context, in *UpsertDiscoveryConfigRequest, opts ...grpc.CallOption) (*DiscoveryConfig, error) { + out := new(DiscoveryConfig) + err := c.cc.Invoke(ctx, DiscoveryConfigService_UpsertDiscoveryConfig_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *discoveryConfigServiceClient) DeleteDiscoveryConfig(ctx context.Context, in *DeleteDiscoveryConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) err := c.cc.Invoke(ctx, DiscoveryConfigService_DeleteDiscoveryConfig_FullMethodName, in, out, opts...) @@ -134,6 +146,8 @@ type DiscoveryConfigServiceServer interface { CreateDiscoveryConfig(context.Context, *CreateDiscoveryConfigRequest) (*DiscoveryConfig, error) // UpdateDiscoveryConfig updates an existing DiscoveryConfig resource. UpdateDiscoveryConfig(context.Context, *UpdateDiscoveryConfigRequest) (*DiscoveryConfig, error) + // UpsertDiscoveryConfig creates or updates a DiscoveryConfig resource. + UpsertDiscoveryConfig(context.Context, *UpsertDiscoveryConfigRequest) (*DiscoveryConfig, error) // DeleteDiscoveryConfig removes the specified DiscoveryConfig resource. DeleteDiscoveryConfig(context.Context, *DeleteDiscoveryConfigRequest) (*emptypb.Empty, error) // DeleteAllDiscoveryConfigs removes all DiscoveryConfigs. @@ -157,6 +171,9 @@ func (UnimplementedDiscoveryConfigServiceServer) CreateDiscoveryConfig(context.C func (UnimplementedDiscoveryConfigServiceServer) UpdateDiscoveryConfig(context.Context, *UpdateDiscoveryConfigRequest) (*DiscoveryConfig, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateDiscoveryConfig not implemented") } +func (UnimplementedDiscoveryConfigServiceServer) UpsertDiscoveryConfig(context.Context, *UpsertDiscoveryConfigRequest) (*DiscoveryConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpsertDiscoveryConfig not implemented") +} func (UnimplementedDiscoveryConfigServiceServer) DeleteDiscoveryConfig(context.Context, *DeleteDiscoveryConfigRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteDiscoveryConfig not implemented") } @@ -249,6 +266,24 @@ func _DiscoveryConfigService_UpdateDiscoveryConfig_Handler(srv interface{}, ctx return interceptor(ctx, in, info, handler) } +func _DiscoveryConfigService_UpsertDiscoveryConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpsertDiscoveryConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiscoveryConfigServiceServer).UpsertDiscoveryConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DiscoveryConfigService_UpsertDiscoveryConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiscoveryConfigServiceServer).UpsertDiscoveryConfig(ctx, req.(*UpsertDiscoveryConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _DiscoveryConfigService_DeleteDiscoveryConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteDiscoveryConfigRequest) if err := dec(in); err != nil { @@ -308,6 +343,10 @@ var DiscoveryConfigService_ServiceDesc = grpc.ServiceDesc{ MethodName: "UpdateDiscoveryConfig", Handler: _DiscoveryConfigService_UpdateDiscoveryConfig_Handler, }, + { + MethodName: "UpsertDiscoveryConfig", + Handler: _DiscoveryConfigService_UpsertDiscoveryConfig_Handler, + }, { MethodName: "DeleteDiscoveryConfig", Handler: _DiscoveryConfigService_DeleteDiscoveryConfig_Handler, diff --git a/api/proto/teleport/discoveryconfig/v1/discoveryconfig_service.proto b/api/proto/teleport/discoveryconfig/v1/discoveryconfig_service.proto index 22e4e6b7032e6..d4c285c2eb4f4 100644 --- a/api/proto/teleport/discoveryconfig/v1/discoveryconfig_service.proto +++ b/api/proto/teleport/discoveryconfig/v1/discoveryconfig_service.proto @@ -39,6 +39,9 @@ service DiscoveryConfigService { // UpdateDiscoveryConfig updates an existing DiscoveryConfig resource. rpc UpdateDiscoveryConfig(UpdateDiscoveryConfigRequest) returns (DiscoveryConfig); + // UpsertDiscoveryConfig creates or updates a DiscoveryConfig resource. + rpc UpsertDiscoveryConfig(UpsertDiscoveryConfigRequest) returns (DiscoveryConfig); + // DeleteDiscoveryConfig removes the specified DiscoveryConfig resource. rpc DeleteDiscoveryConfig(DeleteDiscoveryConfigRequest) returns (google.protobuf.Empty); @@ -83,6 +86,12 @@ message UpdateDiscoveryConfigRequest { DiscoveryConfig discovery_config = 1; } +// UpsertDiscoveryConfigRequest is the request to upsert the provided DiscoveryConfig. +message UpsertDiscoveryConfigRequest { + // DiscoveryConfig is the DiscoveryConfig to be upserted. + DiscoveryConfig discovery_config = 1; +} + // DeleteDiscoveryConfigRequest is a request for deleting a specific DiscoveryConfig resource. message DeleteDiscoveryConfigRequest { // Name is the name of the DiscoveryConfig to be deleted. diff --git a/lib/auth/auth.go b/lib/auth/auth.go index 1c6568d77cf65..6bcf5bac1f3bc 100644 --- a/lib/auth/auth.go +++ b/lib/auth/auth.go @@ -254,6 +254,12 @@ func NewServer(cfg *InitConfig, opts ...ServerOption) (*Server, error) { return nil, trace.Wrap(err) } } + if cfg.DiscoveryConfigs == nil { + cfg.DiscoveryConfigs, err = local.NewDiscoveryConfigService(cfg.Backend) + if err != nil { + return nil, trace.Wrap(err) + } + } if cfg.Embeddings == nil { cfg.Embeddings = local.NewEmbeddingsService(cfg.Backend) } @@ -315,6 +321,7 @@ func NewServer(cfg *InitConfig, opts ...ServerOption) (*Server, error) { SessionTrackerService: cfg.SessionTrackerService, ConnectionsDiagnostic: cfg.ConnectionsDiagnostic, Integrations: cfg.Integrations, + DiscoveryConfigs: cfg.DiscoveryConfigs, Embeddings: cfg.Embeddings, Okta: cfg.Okta, AccessLists: cfg.AccessLists, @@ -451,6 +458,7 @@ type Services struct { services.ConnectionsDiagnostic services.StatusInternal services.Integrations + services.DiscoveryConfigs services.Okta services.AccessLists services.UserLoginStates @@ -484,6 +492,11 @@ func (r *Services) AccessListClient() services.AccessLists { return r } +// DiscoveryConfigClient returns the DiscoveryConfig client. +func (r *Services) DiscoveryConfigClient() services.DiscoveryConfigs { + return r +} + // UserLoginStateClient returns the user login state client. func (r *Services) UserLoginStateClient() services.UserLoginStates { return r diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index e20c2cf262401..e3d4cfb060403 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -36,6 +36,7 @@ import ( "github.com/gravitational/teleport/api" "github.com/gravitational/teleport/api/client" "github.com/gravitational/teleport/api/client/accesslist" + "github.com/gravitational/teleport/api/client/discoveryconfig" "github.com/gravitational/teleport/api/client/okta" "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/client/userloginstate" @@ -44,6 +45,7 @@ import ( "github.com/gravitational/teleport/api/gen/proto/go/assist/v1" accesslistv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/accesslist/v1" devicepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/devicetrust/v1" + discoveryconfigv1 "github.com/gravitational/teleport/api/gen/proto/go/teleport/discoveryconfig/v1" integrationpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/integration/v1" loginrulepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/loginrule/v1" oktapb "github.com/gravitational/teleport/api/gen/proto/go/teleport/okta/v1" @@ -347,6 +349,14 @@ func (a *ServerWithRoles) AccessListClient() services.AccessLists { utils.NewGRPCDummyClientConnection("AccessListClient() should not be called on ServerWithRoles"))) } +// DiscoveryConfigClient allows ServerWithRoles to implement ClientI. +// It should not be called through ServerWithRoles, +// as it returns a dummy client that will always respond with "not implemented". +func (a *ServerWithRoles) DiscoveryConfigClient() services.DiscoveryConfigs { + return discoveryconfig.NewClient(discoveryconfigv1.NewDiscoveryConfigServiceClient( + utils.NewGRPCDummyClientConnection("DiscoveryConfigClient() should not be called on ServerWithRoles"))) +} + // ResourceUsageClient allows ServerWithRoles to implement ClientI. // It should not be called through ServerWithRoles, // as it returns a dummy client that will always respond with "not implemented". diff --git a/lib/auth/clt.go b/lib/auth/clt.go index 82b58a7477ed6..478dbd6c02bb4 100644 --- a/lib/auth/clt.go +++ b/lib/auth/clt.go @@ -447,6 +447,11 @@ func (c *Client) UserLoginStateClient() services.UserLoginStates { return c.APIClient.UserLoginStateClient() } +// DiscoveryConfigClient returns a client for managing the DiscoveryConfig resource. +func (c *Client) DiscoveryConfigClient() services.DiscoveryConfigs { + return c.APIClient.DiscoveryConfigClient() +} + // WebService implements features used by Web UI clients type WebService interface { // GetWebSessionInfo checks if a web session is valid, returns session id in case if @@ -847,6 +852,12 @@ type ClientI interface { // (as per the default gRPC behavior). UserLoginStateClient() services.UserLoginStates + // DiscoveryConfigClient returns a DiscoveryConfig client. + // Clients connecting to older Teleport versions, still get an DiscoveryConfig client + // when calling this method, but all RPCs will return "not implemented" errors + // (as per the default gRPC behavior). + DiscoveryConfigClient() services.DiscoveryConfigs + // ResourceUsageClient returns a resource usage service client. // Clients connecting to non-Enterprise clusters, or older Teleport versions, // still get a client when calling this method, but all RPCs will return diff --git a/lib/auth/discoveryconfig/discoveryconfigv1/service.go b/lib/auth/discoveryconfig/discoveryconfigv1/service.go index 00092cd0c2152..860db0a43fe15 100644 --- a/lib/auth/discoveryconfig/discoveryconfigv1/service.go +++ b/lib/auth/discoveryconfig/discoveryconfigv1/service.go @@ -170,6 +170,26 @@ func (s *Service) UpdateDiscoveryConfig(ctx context.Context, req *discoveryconfi return conv.ToProto(resp), nil } +// UpsertDiscoveryConfig creates or updates a DiscoveryConfig. +func (s *Service) UpsertDiscoveryConfig(ctx context.Context, req *discoveryconfigv1.UpsertDiscoveryConfigRequest) (*discoveryconfigv1.DiscoveryConfig, error) { + _, err := authz.AuthorizeWithVerbs(ctx, s.log, s.authorizer, true, types.KindDiscoveryConfig, types.VerbCreate, types.VerbUpdate) + if err != nil { + return nil, trace.Wrap(err) + } + + dc, err := conv.FromProto(req.GetDiscoveryConfig()) + if err != nil { + return nil, trace.Wrap(err) + } + + resp, err := s.backend.UpsertDiscoveryConfig(ctx, dc) + if err != nil { + return nil, trace.Wrap(err) + } + + return conv.ToProto(resp), nil +} + // DeleteDiscoveryConfig removes the specified DiscoveryConfig resource. func (s *Service) DeleteDiscoveryConfig(ctx context.Context, req *discoveryconfigv1.DeleteDiscoveryConfigRequest) (*emptypb.Empty, error) { _, err := authz.AuthorizeWithVerbs(ctx, s.log, s.authorizer, true, types.KindDiscoveryConfig, types.VerbDelete) diff --git a/lib/auth/discoveryconfig/discoveryconfigv1/service_test.go b/lib/auth/discoveryconfig/discoveryconfigv1/service_test.go index eff6a869ae9a9..ddef5bfa952e1 100644 --- a/lib/auth/discoveryconfig/discoveryconfigv1/service_test.go +++ b/lib/auth/discoveryconfig/discoveryconfigv1/service_test.go @@ -223,6 +223,43 @@ func TestDiscoveryConfigCRUD(t *testing.T) { ErrAssertion: require.NoError, }, + // Upsert + { + Name: "no access to upsert discovery config", + Role: types.RoleSpecV6{ + Allow: types.RoleConditions{Rules: []types.Rule{{ + Resources: []string{types.KindDiscoveryConfig}, + Verbs: []string{types.VerbUpdate}, // missing VerbCreate + }}}, + }, + Test: func(ctx context.Context, resourceSvc *Service, dcName string) error { + dc := sampleDiscoveryConfigFn(t, dcName) + _, err := resourceSvc.UpsertDiscoveryConfig(ctx, &discoveryconfigpb.UpsertDiscoveryConfigRequest{ + DiscoveryConfig: convert.ToProto(dc), + }) + return err + }, + ErrAssertion: requireTraceErrorFn(trace.IsAccessDenied), + }, + { + Name: "access to upsert discovery config", + Role: types.RoleSpecV6{ + Allow: types.RoleConditions{Rules: []types.Rule{{ + Resources: []string{types.KindDiscoveryConfig}, + Verbs: []string{types.VerbUpdate, types.VerbCreate}, + }}}, + }, + Setup: func(t *testing.T, dcName string) {}, + Test: func(ctx context.Context, resourceSvc *Service, dcName string) error { + dc := sampleDiscoveryConfigFn(t, dcName) + _, err := resourceSvc.UpsertDiscoveryConfig(ctx, &discoveryconfigpb.UpsertDiscoveryConfigRequest{ + DiscoveryConfig: convert.ToProto(dc), + }) + return err + }, + ErrAssertion: require.NoError, + }, + // Delete { Name: "no access to delete discovery config", diff --git a/lib/auth/grpcserver.go b/lib/auth/grpcserver.go index 4b647b33166e7..f9a67aee3d709 100644 --- a/lib/auth/grpcserver.go +++ b/lib/auth/grpcserver.go @@ -47,6 +47,7 @@ import ( "github.com/gravitational/teleport/api/constants" "github.com/gravitational/teleport/api/gen/proto/go/assist/v1" auditlogpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/auditlog/v1" + discoveryconfigpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/discoveryconfig/v1" integrationpb "github.com/gravitational/teleport/api/gen/proto/go/teleport/integration/v1" loginrulepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/loginrule/v1" oktapb "github.com/gravitational/teleport/api/gen/proto/go/teleport/okta/v1" @@ -59,6 +60,7 @@ import ( "github.com/gravitational/teleport/api/types/installers" "github.com/gravitational/teleport/api/types/wrappers" "github.com/gravitational/teleport/lib/auth/assist/assistv1" + "github.com/gravitational/teleport/lib/auth/discoveryconfig/discoveryconfigv1" integrationService "github.com/gravitational/teleport/lib/auth/integration/integrationv1" "github.com/gravitational/teleport/lib/auth/loginrule" "github.com/gravitational/teleport/lib/auth/okta" @@ -5375,6 +5377,16 @@ func NewGRPCServer(cfg GRPCServerConfig) (*GRPCServer, error) { } integrationpb.RegisterIntegrationServiceServer(server, integrationServiceServer) + discoveryConfig, err := discoveryconfigv1.NewService(discoveryconfigv1.ServiceConfig{ + Authorizer: cfg.Authorizer, + Backend: cfg.AuthServer.Services, + Clock: cfg.AuthServer.clock, + }) + if err != nil { + return nil, trace.Wrap(err) + } + discoveryconfigpb.RegisterDiscoveryConfigServiceServer(server, discoveryConfig) + // Initialize and register the user preferences service. userPreferencesSrv, err := userpreferencesv1.NewService(&userpreferencesv1.ServiceConfig{ Backend: cfg.AuthServer.Services, diff --git a/lib/auth/init.go b/lib/auth/init.go index 394bdc399075b..c22502b890b8f 100644 --- a/lib/auth/init.go +++ b/lib/auth/init.go @@ -206,6 +206,9 @@ type InitConfig struct { // Integrations is a service that manages Integrations. Integrations services.Integrations + // DiscoveryConfigs is a service that manages DiscoveryConfigs. + DiscoveryConfigs services.DiscoveryConfigs + // Embeddings is a service that manages Embeddings Embeddings services.Embeddings diff --git a/lib/authz/permissions.go b/lib/authz/permissions.go index 41a11398152be..e6300d70086e0 100644 --- a/lib/authz/permissions.go +++ b/lib/authz/permissions.go @@ -889,6 +889,7 @@ func definitionForBuiltinRole(clusterName string, recConfig types.SessionRecordi types.NewRule(types.KindDatabase, services.RW()), types.NewRule(types.KindServerInfo, services.RW()), types.NewRule(types.KindApp, services.RW()), + types.NewRule(types.KindDiscoveryConfig, services.RO()), }, // Discovery service should only access kubes/apps/dbs that originated from discovery. KubernetesLabels: types.Labels{types.OriginLabel: []string{types.OriginCloud}}, diff --git a/lib/cache/cache.go b/lib/cache/cache.go index ce19b7016df57..85e30888eefdf 100644 --- a/lib/cache/cache.go +++ b/lib/cache/cache.go @@ -37,6 +37,7 @@ import ( apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" + "github.com/gravitational/teleport/api/types/discoveryconfig" "github.com/gravitational/teleport/api/types/userloginstate" "github.com/gravitational/teleport/api/utils/retryutils" "github.com/gravitational/teleport/lib/backend" @@ -119,6 +120,7 @@ func ForAuth(cfg Config) Config { {Kind: types.KindAccessList}, {Kind: types.KindUserLoginState}, {Kind: types.KindAccessListMember}, + {Kind: types.KindDiscoveryConfig}, } cfg.QueueSize = defaults.AuthQueueSize // We don't want to enable partial health for auth cache because auth uses an event stream @@ -368,6 +370,7 @@ func ForDiscovery(cfg Config) Config { {Kind: types.KindKubernetesCluster}, {Kind: types.KindDatabase}, {Kind: types.KindApp}, + {Kind: types.KindDiscoveryConfig}, } cfg.QueueSize = defaults.DiscoveryQueueSize return cfg @@ -474,6 +477,7 @@ type Cache struct { userGroupsCache services.UserGroups oktaCache services.Okta integrationsCache services.Integrations + discoveryConfigsCache services.DiscoveryConfigs headlessAuthenticationsCache services.HeadlessAuthenticationService accessListsCache services.AccessLists userLoginStateCache services.UserLoginStates @@ -630,6 +634,8 @@ type Config struct { Okta services.Okta // Integrations is an Integrations service. Integrations services.Integrations + // DiscoveryConfigs is a DiscoveryConfigs service. + DiscoveryConfigs services.DiscoveryConfigs // AccessLists is the access list service. AccessLists services.AccessLists // UserLoginStates is the user login state service. @@ -803,6 +809,12 @@ func New(config Config) (*Cache, error) { return nil, trace.Wrap(err) } + discoveryConfigsCache, err := local.NewDiscoveryConfigService(config.Backend) + if err != nil { + cancel() + return nil, trace.Wrap(err) + } + accessListsCache, err := local.NewAccessListService(config.Backend, config.Clock) if err != nil { cancel() @@ -843,6 +855,7 @@ func New(config Config) (*Cache, error) { userGroupsCache: userGroupsCache, oktaCache: oktaCache, integrationsCache: integrationsCache, + discoveryConfigsCache: discoveryConfigsCache, headlessAuthenticationsCache: local.NewIdentityService(config.Backend), accessListsCache: accessListsCache, userLoginStateCache: userLoginStatesCache, @@ -2479,6 +2492,32 @@ func (c *Cache) GetIntegration(ctx context.Context, name string) (types.Integrat return rg.reader.GetIntegration(ctx, name) } +// ListDiscoveryConfigs returns a paginated list of all DiscoveryConfig resources. +func (c *Cache) ListDiscoveryConfigs(ctx context.Context, pageSize int, nextKey string) ([]*discoveryconfig.DiscoveryConfig, string, error) { + ctx, span := c.Tracer.Start(ctx, "cache/ListDiscoveryConfigs") + defer span.End() + + rg, err := readCollectionCache(c, c.collections.discoveryConfigs) + if err != nil { + return nil, "", trace.Wrap(err) + } + defer rg.Release() + return rg.reader.ListDiscoveryConfigs(ctx, pageSize, nextKey) +} + +// GetDiscoveryConfig returns the specified DiscoveryConfig resource. +func (c *Cache) GetDiscoveryConfig(ctx context.Context, name string) (*discoveryconfig.DiscoveryConfig, error) { + ctx, span := c.Tracer.Start(ctx, "cache/GetDiscoveryConfig") + defer span.End() + + rg, err := readCollectionCache(c, c.collections.discoveryConfigs) + if err != nil { + return nil, trace.Wrap(err) + } + defer rg.Release() + return rg.reader.GetDiscoveryConfig(ctx, name) +} + // ListAccessLists returns a paginated list of all access lists resources. func (c *Cache) ListAccessLists(ctx context.Context, pageSize int, nextKey string) ([]*accesslist.AccessList, string, error) { ctx, span := c.Tracer.Start(ctx, "cache/ListAccessLists") diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go index 74bb6926f2b4e..38d82691e83da 100644 --- a/lib/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -39,6 +39,7 @@ import ( apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" + "github.com/gravitational/teleport/api/types/discoveryconfig" "github.com/gravitational/teleport/api/types/header" "github.com/gravitational/teleport/api/types/trait" "github.com/gravitational/teleport/api/types/userloginstate" @@ -92,6 +93,7 @@ type testPack struct { userGroups services.UserGroups okta services.Okta integrations services.Integrations + discoveryConfigs services.DiscoveryConfigs accessLists services.AccessLists userLoginStates services.UserLoginStates accessListMembers services.AccessListMembers @@ -246,6 +248,12 @@ func newPackWithoutCache(dir string, opts ...packOption) (*testPack, error) { } p.integrations = igSvc + dcSvc, err := local.NewDiscoveryConfigService(p.backend) + if err != nil { + return nil, trace.Wrap(err) + } + p.discoveryConfigs = dcSvc + alSvc, err := local.NewAccessListService(p.backend, p.backend.Clock()) if err != nil { return nil, trace.Wrap(err) @@ -296,6 +304,7 @@ func newPack(dir string, setupConfig func(c Config) Config, opts ...packOption) UserGroups: p.userGroups, Okta: p.okta, Integrations: p.integrations, + DiscoveryConfigs: p.discoveryConfigs, AccessLists: p.accessLists, UserLoginStates: p.userLoginStates, MaxRetryPeriod: 200 * time.Millisecond, @@ -693,6 +702,7 @@ func TestCompletenessInit(t *testing.T) { UserGroups: p.userGroups, Okta: p.okta, Integrations: p.integrations, + DiscoveryConfigs: p.discoveryConfigs, AccessLists: p.accessLists, UserLoginStates: p.userLoginStates, MaxRetryPeriod: 200 * time.Millisecond, @@ -763,6 +773,7 @@ func TestCompletenessReset(t *testing.T) { UserGroups: p.userGroups, Okta: p.okta, Integrations: p.integrations, + DiscoveryConfigs: p.discoveryConfigs, AccessLists: p.accessLists, UserLoginStates: p.userLoginStates, MaxRetryPeriod: 200 * time.Millisecond, @@ -945,6 +956,7 @@ func TestListResources_NodesTTLVariant(t *testing.T) { UserGroups: p.userGroups, Okta: p.okta, Integrations: p.integrations, + DiscoveryConfigs: p.discoveryConfigs, AccessLists: p.accessLists, UserLoginStates: p.userLoginStates, MaxRetryPeriod: 200 * time.Millisecond, @@ -1026,6 +1038,7 @@ func initStrategy(t *testing.T) { UserGroups: p.userGroups, Okta: p.okta, Integrations: p.integrations, + DiscoveryConfigs: p.discoveryConfigs, AccessLists: p.accessLists, UserLoginStates: p.userLoginStates, MaxRetryPeriod: 200 * time.Millisecond, @@ -2096,6 +2109,45 @@ func TestIntegrations(t *testing.T) { }) } +// TestDiscoveryConfig tests that CRUD operations on DiscoveryConfig resources are +// replicated from the backend to the cache. +func TestDiscoveryConfig(t *testing.T) { + t.Parallel() + + p := newTestPack(t, ForAuth) + t.Cleanup(p.Close) + + testResources(t, p, testFuncs[*discoveryconfig.DiscoveryConfig]{ + newResource: func(name string) (*discoveryconfig.DiscoveryConfig, error) { + dc, err := discoveryconfig.NewDiscoveryConfig( + header.Metadata{Name: "mydc"}, + discoveryconfig.Spec{ + DiscoveryGroup: "group001", + }) + require.NoError(t, err) + return dc, nil + }, + create: func(ctx context.Context, discoveryConfig *discoveryconfig.DiscoveryConfig) error { + _, err := p.discoveryConfigs.CreateDiscoveryConfig(ctx, discoveryConfig) + return trace.Wrap(err) + }, + list: func(ctx context.Context) ([]*discoveryconfig.DiscoveryConfig, error) { + results, _, err := p.discoveryConfigs.ListDiscoveryConfigs(ctx, 0, "") + return results, err + }, + cacheGet: p.cache.GetDiscoveryConfig, + cacheList: func(ctx context.Context) ([]*discoveryconfig.DiscoveryConfig, error) { + results, _, err := p.cache.ListDiscoveryConfigs(ctx, 0, "") + return results, err + }, + update: func(ctx context.Context, discoveryConfig *discoveryconfig.DiscoveryConfig) error { + _, err := p.discoveryConfigs.UpdateDiscoveryConfig(ctx, discoveryConfig) + return trace.Wrap(err) + }, + deleteAll: p.discoveryConfigs.DeleteAllDiscoveryConfigs, + }) +} + // TestAccessLists tests that CRUD operations on access list resources are // replicated from the backend to the cache. func TestAccessLists(t *testing.T) { @@ -2645,6 +2697,7 @@ func TestCacheWatchKindExistsInEvents(t *testing.T) { types.KindOktaImportRule: &types.OktaImportRuleV1{}, types.KindOktaAssignment: &types.OktaAssignmentV1{}, types.KindIntegration: &types.IntegrationV1{}, + types.KindDiscoveryConfig: newDiscoveryConfig(t, "discovery-config"), types.KindHeadlessAuthentication: &types.HeadlessAuthentication{}, types.KindAccessList: newAccessList(t, "access-list", clock), types.KindUserLoginState: newUserLoginState(t, "user-login-state"), @@ -2899,6 +2952,25 @@ func newAccessList(t *testing.T, name string, clock clockwork.Clock) *accesslist return accessList } +func newDiscoveryConfig(t *testing.T, name string) *discoveryconfig.DiscoveryConfig { + t.Helper() + + discoveryConfig, err := discoveryconfig.NewDiscoveryConfig( + header.Metadata{ + Name: name, + }, + discoveryconfig.Spec{ + DiscoveryGroup: "mygroup", + AWS: []types.AWSMatcher{}, + Azure: []types.AzureMatcher{}, + GCP: []types.GCPMatcher{}, + Kube: []types.KubernetesMatcher{}, + }, + ) + require.NoError(t, err) + return discoveryConfig +} + func newUserLoginState(t *testing.T, name string) *userloginstate.UserLoginState { t.Helper() diff --git a/lib/cache/collections.go b/lib/cache/collections.go index dcc13971ef8f6..0dae1f913829a 100644 --- a/lib/cache/collections.go +++ b/lib/cache/collections.go @@ -28,6 +28,7 @@ import ( apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/accesslist" + "github.com/gravitational/teleport/api/types/discoveryconfig" "github.com/gravitational/teleport/api/types/userloginstate" "github.com/gravitational/teleport/lib/services" ) @@ -187,6 +188,7 @@ type cacheCollections struct { clusterNetworkingConfigs collectionReader[clusterNetworkingConfigGetter] databases collectionReader[services.DatabaseGetter] databaseServers collectionReader[databaseServerGetter] + discoveryConfigs collectionReader[services.DiscoveryConfigsGetter] installers collectionReader[installerGetter] integrations collectionReader[services.IntegrationsGetter] kubeClusters collectionReader[kubernetesClusterGetter] @@ -595,6 +597,12 @@ func setupCollections(c *Cache, watches []types.WatchKind) (*cacheCollections, e watch: watch, } collections.byKind[resourceKind] = collections.integrations + case types.KindDiscoveryConfig: + if c.DiscoveryConfigs == nil { + return nil, trace.BadParameter("missing parameter DiscoveryConfigs") + } + collections.discoveryConfigs = &genericCollection[*discoveryconfig.DiscoveryConfig, services.DiscoveryConfigsGetter, discoveryConfigExecutor]{cache: c, watch: watch} + collections.byKind[resourceKind] = collections.discoveryConfigs case types.KindHeadlessAuthentication: // For headless authentications, we need only process events. We don't need to keep the cache up to date. collections.byKind[resourceKind] = &genericCollection[*types.HeadlessAuthentication, noReader, noopExecutor]{cache: c, watch: watch} @@ -2373,6 +2381,53 @@ func (accessListsExecutor) getReader(cache *Cache, cacheOK bool) services.Access var _ executor[*accesslist.AccessList, services.AccessListsGetter] = accessListsExecutor{} +type discoveryConfigExecutor struct{} + +func (discoveryConfigExecutor) getAll(ctx context.Context, cache *Cache, loadSecrets bool) ([]*discoveryconfig.DiscoveryConfig, error) { + var discoveryConfigs []*discoveryconfig.DiscoveryConfig + var nextToken string + for { + var page []*discoveryconfig.DiscoveryConfig + var err error + + page, nextToken, err = cache.DiscoveryConfigs.ListDiscoveryConfigs(ctx, 0 /* default page size */, nextToken) + if err != nil { + return nil, trace.Wrap(err) + } + + discoveryConfigs = append(discoveryConfigs, page...) + + if nextToken == "" { + break + } + } + return discoveryConfigs, nil +} + +func (discoveryConfigExecutor) upsert(ctx context.Context, cache *Cache, resource *discoveryconfig.DiscoveryConfig) error { + _, err := cache.discoveryConfigsCache.UpsertDiscoveryConfig(ctx, resource) + return trace.Wrap(err) +} + +func (discoveryConfigExecutor) deleteAll(ctx context.Context, cache *Cache) error { + return cache.discoveryConfigsCache.DeleteAllDiscoveryConfigs(ctx) +} + +func (discoveryConfigExecutor) delete(ctx context.Context, cache *Cache, resource types.Resource) error { + return cache.discoveryConfigsCache.DeleteDiscoveryConfig(ctx, resource.GetName()) +} + +func (discoveryConfigExecutor) isSingleton() bool { return false } + +func (discoveryConfigExecutor) getReader(cache *Cache, cacheOK bool) services.DiscoveryConfigsGetter { + if cacheOK { + return cache.discoveryConfigsCache + } + return cache.Config.DiscoveryConfigs +} + +var _ executor[*discoveryconfig.DiscoveryConfig, services.DiscoveryConfigsGetter] = discoveryConfigExecutor{} + // noopExecutor can be used when a resource's events do not need to processed by // the cache itself, only passed on to other watchers. type noopExecutor struct{} diff --git a/lib/service/service.go b/lib/service/service.go index 1c370bb4b8e5f..d5a2cf573d860 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -2222,6 +2222,7 @@ func (process *TeleportProcess) newAccessCache(cfg accessCacheConfig) (*cache.Ca AccessLists: cfg.services.AccessListClient(), UserLoginStates: cfg.services.UserLoginStateClient(), Integrations: cfg.services, + DiscoveryConfigs: cfg.services.DiscoveryConfigClient(), WebSession: cfg.services.WebSessions(), WebToken: cfg.services.WebTokens(), Component: teleport.Component(append(cfg.cacheName, process.id, teleport.ComponentCache)...), diff --git a/lib/services/discoveryconfig.go b/lib/services/discoveryconfig.go index 4d40e2d506497..028327445287d 100644 --- a/lib/services/discoveryconfig.go +++ b/lib/services/discoveryconfig.go @@ -35,6 +35,8 @@ type DiscoveryConfigs interface { CreateDiscoveryConfig(context.Context, *discoveryconfig.DiscoveryConfig) (*discoveryconfig.DiscoveryConfig, error) // UpdateDiscoveryConfig updates an existing DiscoveryConfig resource. UpdateDiscoveryConfig(context.Context, *discoveryconfig.DiscoveryConfig) (*discoveryconfig.DiscoveryConfig, error) + // UpsertDiscoveryConfig upserts a DiscoveryConfig resource. + UpsertDiscoveryConfig(context.Context, *discoveryconfig.DiscoveryConfig) (*discoveryconfig.DiscoveryConfig, error) // DeleteDiscoveryConfig removes the specified DiscoveryConfig resource. DeleteDiscoveryConfig(ctx context.Context, name string) error // DeleteAllDiscoveryConfigs removes all DiscoveryConfigs. diff --git a/lib/services/local/discoveryconfig.go b/lib/services/local/discoveryconfig.go index 218312693f75d..086a5f926f87b 100644 --- a/lib/services/local/discoveryconfig.go +++ b/lib/services/local/discoveryconfig.go @@ -103,6 +103,19 @@ func (s *DiscoveryConfigService) UpdateDiscoveryConfig(ctx context.Context, dc * return dc, nil } +// UpsertDiscoveryConfigs upserts a DiscoveryConfig resource. +func (s *DiscoveryConfigService) UpsertDiscoveryConfig(ctx context.Context, dc *discoveryconfig.DiscoveryConfig) (*discoveryconfig.DiscoveryConfig, error) { + if err := dc.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + if err := s.svc.UpsertResource(ctx, dc); err != nil { + return nil, trace.Wrap(err) + } + + return dc, nil +} + // DeleteDiscoveryConfigs removes the specified DiscoveryConfig resource. func (s *DiscoveryConfigService) DeleteDiscoveryConfig(ctx context.Context, name string) error { return trace.Wrap(s.svc.DeleteResource(ctx, name)) diff --git a/lib/services/local/discoveryconfig_test.go b/lib/services/local/discoveryconfig_test.go index 3f1a9edb53d04..5e851cba66d91 100644 --- a/lib/services/local/discoveryconfig_test.go +++ b/lib/services/local/discoveryconfig_test.go @@ -50,6 +50,7 @@ func TestDiscoveryConfigCRUD(t *testing.T) { // Create a couple discovery configs. discoveryConfig1 := newDiscoveryConfig(t, "discovery-config-1") discoveryConfig2 := newDiscoveryConfig(t, "discovery-config-2") + discoveryConfig3 := newDiscoveryConfig(t, "discovery-config-3") // Initially we expect no discovery configs. out, nextToken, err := service.ListDiscoveryConfigs(ctx, 0, "") @@ -102,13 +103,27 @@ func TestDiscoveryConfigCRUD(t *testing.T) { require.NoError(t, err) require.Empty(t, cmp.Diff(discoveryConfig1, discoveryConfig, cmpOpts...)) + // Upsert a discovery config updates if it already exists. + discoveryConfig1.SetExpiry(clock.Now().Add(40 * time.Minute)) + discoveryConfig, err = service.UpsertDiscoveryConfig(ctx, discoveryConfig1) + require.NoError(t, err) + require.Empty(t, cmp.Diff(discoveryConfig1, discoveryConfig, cmpOpts...)) + discoveryConfig, err = service.GetDiscoveryConfig(ctx, discoveryConfig1.GetName()) + require.NoError(t, err) + require.Empty(t, cmp.Diff(discoveryConfig1, discoveryConfig, cmpOpts...)) + + // Upsert a discovery config creates if it doesn't exist. + discoveryConfig, err = service.UpsertDiscoveryConfig(ctx, discoveryConfig3) + require.NoError(t, err) + require.Empty(t, cmp.Diff(discoveryConfig3, discoveryConfig, cmpOpts...)) + // Delete a discovery config. err = service.DeleteDiscoveryConfig(ctx, discoveryConfig1.GetName()) require.NoError(t, err) out, nextToken, err = service.ListDiscoveryConfigs(ctx, 0, "") require.NoError(t, err) require.Empty(t, nextToken) - require.Empty(t, cmp.Diff([]*discoveryconfig.DiscoveryConfig{discoveryConfig2}, out, cmpOpts...)) + require.Empty(t, cmp.Diff([]*discoveryconfig.DiscoveryConfig{discoveryConfig2, discoveryConfig3}, out, cmpOpts...)) // Try to delete a discovery config that doesn't exist. err = service.DeleteDiscoveryConfig(ctx, "doesnotexist") diff --git a/lib/services/local/events.go b/lib/services/local/events.go index a3e1e472e8220..9fef40a6e2e53 100644 --- a/lib/services/local/events.go +++ b/lib/services/local/events.go @@ -162,6 +162,8 @@ func (e *EventsService) NewWatcher(ctx context.Context, watch types.Watch) (type parser = newOktaAssignmentParser() case types.KindIntegration: parser = newIntegrationParser() + case types.KindDiscoveryConfig: + parser = newDiscoveryConfigParser() case types.KindHeadlessAuthentication: p, err := newHeadlessAuthenticationParser(kind.Filter) if err != nil { @@ -1566,6 +1568,30 @@ func (p *integrationParser) parse(event backend.Event) (types.Resource, error) { } } +func newDiscoveryConfigParser() *discoveryConfigParser { + return &discoveryConfigParser{ + baseParser: newBaseParser(backend.Key(discoveryConfigPrefix)), + } +} + +type discoveryConfigParser struct { + baseParser +} + +func (p *discoveryConfigParser) parse(event backend.Event) (types.Resource, error) { + switch event.Type { + case types.OpDelete: + return resourceHeader(event, types.KindDiscoveryConfig, types.V1, 0) + case types.OpPut: + return services.UnmarshalDiscoveryConfig(event.Item.Value, + services.WithResourceID(event.Item.ID), + services.WithExpires(event.Item.Expires), + ) + default: + return nil, trace.BadParameter("event %v is not supported", event.Type) + } +} + func newHeadlessAuthenticationParser(m map[string]string) (*headlessAuthenticationParser, error) { var filter types.HeadlessAuthenticationFilter if err := filter.FromMap(m); err != nil { diff --git a/lib/services/presets.go b/lib/services/presets.go index 0e0794a1c6246..83c8d0e56f041 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -162,6 +162,7 @@ func NewPresetEditorRole() types.Role { types.NewRule(types.KindClusterAlert, RW()), types.NewRule(types.KindAccessList, RW()), types.NewRule(types.KindNode, RW()), + types.NewRule(types.KindDiscoveryConfig, RW()), }, }, }, diff --git a/lib/services/resource.go b/lib/services/resource.go index 9782d42f1876d..8c1a1a2c00a11 100644 --- a/lib/services/resource.go +++ b/lib/services/resource.go @@ -199,6 +199,8 @@ func ParseShortcut(in string) (string, error) { return types.KindIntegration, nil case types.KindAccessList, types.KindAccessList + "s", "accesslist", "accesslists": return types.KindAccessList, nil + case types.KindDiscoveryConfig, types.KindDiscoveryConfig + "s", "discoveryconfig", "discoveryconfigs": + return types.KindDiscoveryConfig, nil } return "", trace.BadParameter("unsupported resource: %q - resources should be expressed as 'type/name', for example 'connector/github'", in) } diff --git a/lib/services/services.go b/lib/services/services.go index 2405f5618658b..2b9d99b9152b1 100644 --- a/lib/services/services.go +++ b/lib/services/services.go @@ -48,6 +48,7 @@ type Services interface { OktaClient() Okta AccessListClient() AccessLists UserLoginStateClient() UserLoginStates + DiscoveryConfigClient() DiscoveryConfigs } // RotationGetter returns the rotation state. diff --git a/lib/services/useracl.go b/lib/services/useracl.go index fc197b9456deb..5b6ce49d81610 100644 --- a/lib/services/useracl.go +++ b/lib/services/useracl.go @@ -92,6 +92,8 @@ type UserACL struct { SAMLIdpServiceProvider ResourceAccess `json:"samlIdpServiceProvider"` // AccessList defines access to access list management. AccessList ResourceAccess `json:"accessList"` + // DiscoveryConfig defines whether the user has access to manage DiscoveryConfigs. + DiscoveryConfig ResourceAccess `json:"discoverConfigs"` } func hasAccess(roleSet RoleSet, ctx *Context, kind string, verbs ...string) bool { @@ -159,6 +161,7 @@ func NewUserACL(user types.User, userRoles RoleSet, features proto.Features, des license := newAccess(userRoles, ctx, types.KindLicense) deviceTrust := newAccess(userRoles, ctx, types.KindDevice) integrationsAccess := newAccess(userRoles, ctx, types.KindIntegration) + discoveryConfigsAccess := newAccess(userRoles, ctx, types.KindDiscoveryConfig) lockAccess := newAccess(userRoles, ctx, types.KindLock) accessListAccess := newAccess(userRoles, ctx, types.KindAccessList) @@ -187,6 +190,7 @@ func NewUserACL(user types.User, userRoles RoleSet, features proto.Features, des License: license, Plugins: pluginsAccess, Integrations: integrationsAccess, + DiscoveryConfig: discoveryConfigsAccess, DeviceTrust: deviceTrust, Locks: lockAccess, Assist: assistAccess, diff --git a/lib/services/useracl_test.go b/lib/services/useracl_test.go index cafeeec12d0d9..3fa526d50a47a 100644 --- a/lib/services/useracl_test.go +++ b/lib/services/useracl_test.go @@ -147,6 +147,7 @@ func TestNewUserACLCloud(t *testing.T) { require.Empty(t, cmp.Diff(userContext.Tokens, allowedRW)) require.Empty(t, cmp.Diff(userContext.Nodes, allowedRW)) require.Empty(t, cmp.Diff(userContext.AccessRequests, allowedRW)) + require.Empty(t, cmp.Diff(userContext.DiscoveryConfig, allowedRW)) require.Equal(t, userContext.Clipboard, true) require.Equal(t, userContext.DesktopSessionRecording, true) diff --git a/tool/tctl/common/collection.go b/tool/tctl/common/collection.go index 3232d6f70e129..745064238e563 100644 --- a/tool/tctl/common/collection.go +++ b/tool/tctl/common/collection.go @@ -30,6 +30,7 @@ import ( devicepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/devicetrust/v1" loginrulepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/loginrule/v1" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/discoveryconfig" apiutils "github.com/gravitational/teleport/api/utils" "github.com/gravitational/teleport/lib/asciitable" "github.com/gravitational/teleport/lib/devicetrust" @@ -1127,6 +1128,30 @@ func (c *deviceCollection) writeText(w io.Writer, verbose bool) error { return trace.Wrap(err) } +type discoveryConfigCollection struct { + discoveryConfigs []*discoveryconfig.DiscoveryConfig +} + +func (c *discoveryConfigCollection) resources() []types.Resource { + resources := make([]types.Resource, len(c.discoveryConfigs)) + for i, dc := range c.discoveryConfigs { + resources[i] = dc + } + return resources +} + +func (c *discoveryConfigCollection) writeText(w io.Writer, verbose bool) error { + t := asciitable.MakeTable([]string{"Name", "Discovery Group"}) + for _, dc := range c.discoveryConfigs { + t.AddRow([]string{ + dc.GetName(), + dc.GetDiscoveryGroup(), + }) + } + _, err := t.AsBuffer().WriteTo(w) + return trace.Wrap(err) +} + type oktaImportRuleCollection struct { importRules []types.OktaImportRule } diff --git a/tool/tctl/common/resource_command.go b/tool/tctl/common/resource_command.go index 72a20b3750b68..952cdfad79b85 100644 --- a/tool/tctl/common/resource_command.go +++ b/tool/tctl/common/resource_command.go @@ -41,6 +41,7 @@ import ( devicepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/devicetrust/v1" loginrulepb "github.com/gravitational/teleport/api/gen/proto/go/teleport/loginrule/v1" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/discoveryconfig" "github.com/gravitational/teleport/api/types/installers" "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/client" @@ -130,6 +131,7 @@ func (rc *ResourceCommand) Initialize(app *kingpin.Application, config *servicec types.KindIntegration: rc.createIntegration, types.KindWindowsDesktop: rc.createWindowsDesktop, types.KindAccessList: rc.createAccessList, + types.KindDiscoveryConfig: rc.createDiscoveryConfig, } rc.config = config @@ -960,6 +962,31 @@ func (rc *ResourceCommand) createIntegration(ctx context.Context, client auth.Cl return nil } +func (rc *ResourceCommand) createDiscoveryConfig(ctx context.Context, client auth.ClientI, raw services.UnknownResource) error { + discoveryConfig, err := services.UnmarshalDiscoveryConfig(raw.Raw) + if err != nil { + return trace.Wrap(err) + } + + remote := client.DiscoveryConfigClient() + + if rc.force { + if _, err := remote.UpsertDiscoveryConfig(ctx, discoveryConfig); err != nil { + return trace.Wrap(err) + } + + fmt.Printf("DiscoveryConfig %q has been written\n", discoveryConfig.GetName()) + return nil + } + + if _, err := remote.CreateDiscoveryConfig(ctx, discoveryConfig); err != nil { + return trace.Wrap(err) + } + fmt.Printf("DiscoveryConfig %q has been created\n", discoveryConfig.GetName()) + + return nil +} + func (rc *ResourceCommand) createAccessList(ctx context.Context, client auth.ClientI, raw services.UnknownResource) error { accessList, err := services.UnmarshalAccessList(raw.Raw) if err != nil { @@ -1270,6 +1297,13 @@ func (rc *ResourceCommand) Delete(ctx context.Context, client auth.ClientI) (err } fmt.Printf("Integration %q removed\n", rc.ref.Name) + case types.KindDiscoveryConfig: + remote := client.DiscoveryConfigClient() + if err := remote.DeleteDiscoveryConfig(ctx, rc.ref.Name); err != nil { + return trace.Wrap(err) + } + fmt.Printf("DiscoveryConfig %q removed\n", rc.ref.Name) + case types.KindAppServer: appServers, err := client.GetApplicationServers(ctx, rc.namespace) if err != nil { @@ -2050,6 +2084,33 @@ func (rc *ResourceCommand) getCollection(ctx context.Context, client auth.Client } return &integrationCollection{integrations: resources}, nil + + case types.KindDiscoveryConfig: + remote := client.DiscoveryConfigClient() + if rc.ref.Name != "" { + dc, err := remote.GetDiscoveryConfig(ctx, rc.ref.Name) + if err != nil { + return nil, trace.Wrap(err) + } + return &discoveryConfigCollection{discoveryConfigs: []*discoveryconfig.DiscoveryConfig{dc}}, nil + } + + var resources []*discoveryconfig.DiscoveryConfig + var dcs []*discoveryconfig.DiscoveryConfig + var err error + var nextKey string + for { + dcs, nextKey, err = remote.ListDiscoveryConfigs(ctx, 0, nextKey) + if err != nil { + return nil, trace.Wrap(err) + } + resources = append(resources, dcs...) + if nextKey == "" { + break + } + } + + return &discoveryConfigCollection{discoveryConfigs: resources}, nil } return nil, trace.BadParameter("getting %q is not supported", rc.ref.String()) } diff --git a/tool/tctl/common/resource_command_test.go b/tool/tctl/common/resource_command_test.go index 2412ada58eb6b..86f2f9e637423 100644 --- a/tool/tctl/common/resource_command_test.go +++ b/tool/tctl/common/resource_command_test.go @@ -37,6 +37,8 @@ import ( "github.com/gravitational/teleport/api/constants" apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/api/types/discoveryconfig" + "github.com/gravitational/teleport/api/types/header" "github.com/gravitational/teleport/integration/helpers" "github.com/gravitational/teleport/lib/config" "github.com/gravitational/teleport/lib/defaults" @@ -413,6 +415,119 @@ func TestIntegrationResource(t *testing.T) { }) } +// TestDiscoveryConfigResource tests tctl discoveryConfig commands. +func TestDiscoveryConfigResource(t *testing.T) { + dynAddr := helpers.NewDynamicServiceAddr(t) + + ctx := context.Background() + fileConfig := &config.FileConfig{ + Global: config.Global{ + DataDir: t.TempDir(), + }, + Proxy: config.Proxy{ + Service: config.Service{ + EnabledFlag: "true", + }, + WebAddr: dynAddr.WebAddr, + TunAddr: dynAddr.TunnelAddr, + }, + Auth: config.Auth{ + Service: config.Service{ + EnabledFlag: "true", + ListenAddress: dynAddr.AuthAddr, + }, + }, + } + + auth := makeAndRunTestAuthServer(t, withFileConfig(fileConfig), withFileDescriptors(dynAddr.Descriptors)) + + t.Run("get", func(t *testing.T) { + // Add a lot of DiscoveryConfigs to test pagination + dc, err := discoveryconfig.NewDiscoveryConfig( + header.Metadata{ + Name: "mydiscoveryconfig", + }, + discoveryconfig.Spec{ + DiscoveryGroup: "prod-resources", + }, + ) + require.NoError(t, err) + + randomDiscoveryConfigName := "" + totalDiscoveryConfigs := apidefaults.DefaultChunkSize*2 + 20 // testing partial pages + for i := 0; i < totalDiscoveryConfigs; i++ { + dc.SetName(uuid.NewString()) + if i == apidefaults.DefaultChunkSize { // A "random" discoveryConfig name + randomDiscoveryConfigName = dc.GetName() + } + _, err = auth.GetAuthServer().CreateDiscoveryConfig(ctx, dc) + require.NoError(t, err) + } + + t.Run("test pagination of discovery configs ", func(t *testing.T) { + buff, err := runResourceCommand(t, fileConfig, []string{"get", types.KindDiscoveryConfig, "--format=json"}) + require.NoError(t, err) + out := mustDecodeJSON[[]discoveryconfig.DiscoveryConfig](t, buff) + require.Len(t, out, totalDiscoveryConfigs) + }) + + dcName := fmt.Sprintf("%v/%v", types.KindDiscoveryConfig, randomDiscoveryConfigName) + + t.Run("get specific discovery config", func(t *testing.T) { + buff, err := runResourceCommand(t, fileConfig, []string{"get", dcName, "--format=json"}) + require.NoError(t, err) + out := mustDecodeJSON[[]discoveryconfig.DiscoveryConfig](t, buff) + require.Len(t, out, 1) + require.Equal(t, randomDiscoveryConfigName, out[0].GetName()) + }) + + t.Run("get unknown discovery config", func(t *testing.T) { + unknownDiscoveryConfig := fmt.Sprintf("%v/%v", types.KindDiscoveryConfig, "unknown") + _, err := runResourceCommand(t, fileConfig, []string{"get", unknownDiscoveryConfig, "--format=json"}) + require.True(t, trace.IsNotFound(err), "expected a NotFound error, got %v", err) + }) + + t.Run("get specific discovery config with human output", func(t *testing.T) { + buff, err := runResourceCommand(t, fileConfig, []string{"get", dcName, "--format=text"}) + require.NoError(t, err) + outputString := buff.String() + require.Contains(t, outputString, "prod-resources") + require.Contains(t, outputString, randomDiscoveryConfigName) + }) + }) + + t.Run("create", func(t *testing.T) { + discoveryConfigYAMLPath := filepath.Join(t.TempDir(), "discoveryConfig.yaml") + require.NoError(t, os.WriteFile(discoveryConfigYAMLPath, []byte(discoveryConfigYAML), 0644)) + _, err := runResourceCommand(t, fileConfig, []string{"create", discoveryConfigYAMLPath}) + require.NoError(t, err) + + buff, err := runResourceCommand(t, fileConfig, []string{"get", "discovery_config/my-discovery-config", "--format=text"}) + require.NoError(t, err) + outputString := buff.String() + require.Contains(t, outputString, "my-discovery-config") + require.Contains(t, outputString, "mydg1") + + // Update the discovery group to another group + discoveryConfigYAMLV2 := strings.ReplaceAll(discoveryConfigYAML, "mydg1", "mydg2") + require.NoError(t, os.WriteFile(discoveryConfigYAMLPath, []byte(discoveryConfigYAMLV2), 0644)) + + // Trying to create it again should return an error + _, err = runResourceCommand(t, fileConfig, []string{"create", discoveryConfigYAMLPath}) + require.True(t, trace.IsAlreadyExists(err), "expected already exists error, got %v", err) + + // Using the force should be ok and replace the current object + _, err = runResourceCommand(t, fileConfig, []string{"create", "--force", discoveryConfigYAMLPath}) + require.NoError(t, err) + + // The DiscoveryGroup must be updated + buff, err = runResourceCommand(t, fileConfig, []string{"get", "discovery_config/my-discovery-config", "--format=text"}) + require.NoError(t, err) + outputString = buff.String() + require.Contains(t, outputString, "mydg2") + }) +} + func TestCreateLock(t *testing.T) { dynAddr := helpers.NewDynamicServiceAddr(t) fileConfig := &config.FileConfig{ @@ -621,6 +736,17 @@ spec: aws_oidc: role_arn: "arn:aws:iam::123456789012:role/OpsTeam" ` + + discoveryConfigYAML = `kind: discovery_config +version: v1 +metadata: + name: my-discovery-config +spec: + discovery_group: mydg1 + aws: + - types: ["ec2"] + regions: ["eu-west-2"] +` ) func TestCreateClusterAuthPreference_WithSupportForSecondFactorWithoutQuotes(t *testing.T) { From aee26a67b3dc677353fdaa76b6c2a6858c6746bb Mon Sep 17 00:00:00 2001 From: Andrew LeFevre Date: Wed, 11 Oct 2023 10:57:11 -0400 Subject: [PATCH 18/22] [v14] Report exit code of rsync processes if they fail in TestWithRsync (#33262) * report exit code of rsync processes if they fail * use correct 't' when asserting * Update tool/tsh/common/proxy_test.go Co-authored-by: Zac Bergquist --------- Co-authored-by: Zac Bergquist --- tool/tsh/common/proxy_test.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/tool/tsh/common/proxy_test.go b/tool/tsh/common/proxy_test.go index 617d4f4d66985..75fceabd86b9d 100644 --- a/tool/tsh/common/proxy_test.go +++ b/tool/tsh/common/proxy_test.go @@ -21,6 +21,7 @@ import ( "context" "crypto/rand" "encoding/json" + "errors" "fmt" "net" "os" @@ -329,11 +330,14 @@ func TestWithRsync(t *testing.T) { }) require.NoError(t, err) - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(t *assert.CollectT) { pref, err := asrv.GetAuthPreference(ctx) - require.NoError(t, err) + if !assert.NoError(t, err) { + return + } w, err := pref.GetWebauthn() - return err == nil && w != nil + assert.NoError(t, err) + assert.NotNil(t, w) }, 5*time.Second, 100*time.Millisecond) token, err := asrv.CreateResetPasswordToken(ctx, auth.CreateUserTokenRequest{ @@ -477,7 +481,12 @@ func TestWithRsync(t *testing.T) { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() - require.NoError(t, err) + var msg string + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + msg = fmt.Sprintf("exit code: %d", exitErr.ExitCode()) + } + require.NoError(t, err, msg) // verify that dst exists and that its contents match src dstContents, err := os.ReadFile(dstPath) From 83dacd78c29e66d3c12880561cc80c2d307c6984 Mon Sep 17 00:00:00 2001 From: Edward Dowling Date: Wed, 11 Oct 2023 16:19:05 +0100 Subject: [PATCH 19/22] [v14] Remove check that enforces slack oauthProviders are set (#33141) * Remove check that enforces slack oauthProviders are set * Remove test that checks for an error when hosted plugins is true * Set hosted plugins to always be true * Update tests that check hosted plugins is disabled * Add comment explaining hosted being set to true at all times --- lib/config/configuration.go | 12 ++++++------ lib/config/configuration_test.go | 15 ++------------- lib/config/fileconf.go | 14 ++++++-------- 3 files changed, 14 insertions(+), 27 deletions(-) diff --git a/lib/config/configuration.go b/lib/config/configuration.go index 582689d96c7d3..c407da59bc868 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -850,12 +850,12 @@ func applyAuthConfig(fc *FileConfig, cfg *servicecfg.Config) error { cfg.Auth.LoadAllCAs = fc.Auth.LoadAllCAs - if fc.Auth.HostedPlugins.Enabled { - cfg.Auth.HostedPlugins.Enabled = true - cfg.Auth.HostedPlugins.OAuthProviders, err = fc.Auth.HostedPlugins.OAuthProviders.Parse() - if err != nil { - return trace.Wrap(err) - } + // Setting this to true at all times to allow self hosting + // of plugins that were previously cloud only. + cfg.Auth.HostedPlugins.Enabled = true + cfg.Auth.HostedPlugins.OAuthProviders, err = fc.Auth.HostedPlugins.OAuthProviders.Parse() + if err != nil { + return trace.Wrap(err) } return nil diff --git a/lib/config/configuration_test.go b/lib/config/configuration_test.go index 33eab3f8edc55..c6f6aa1af55cb 100644 --- a/lib/config/configuration_test.go +++ b/lib/config/configuration_test.go @@ -3593,7 +3593,7 @@ func TestAuthHostedPlugins(t *testing.T) { assert func(t *testing.T, p servicecfg.HostedPluginsConfig) }{ { - desc: "Plugins disabled by default", + desc: "Plugins enabled by default", config: strings.Join([]string{ "auth_service:", " enabled: yes", @@ -3601,20 +3601,9 @@ func TestAuthHostedPlugins(t *testing.T) { readErr: require.NoError, applyErr: require.NoError, assert: func(t *testing.T, p servicecfg.HostedPluginsConfig) { - require.False(t, p.Enabled) + require.True(t, p.Enabled) }, }, - { - desc: "Plugins enabled but zero providers defined", - config: strings.Join([]string{ - "auth_service:", - " enabled: yes", - " hosted_plugins:", - " enabled: yes", - }, "\n"), - readErr: require.NoError, - applyErr: badParameter, - }, { desc: "Unknown OAuth provider specified", config: strings.Join([]string{ diff --git a/lib/config/fileconf.go b/lib/config/fileconf.go index 8a8293e9f5d9a..342a8403f0acb 100644 --- a/lib/config/fileconf.go +++ b/lib/config/fileconf.go @@ -1205,15 +1205,13 @@ type PluginOAuthProviders struct { func (p *PluginOAuthProviders) Parse() (servicecfg.PluginOAuthProviders, error) { out := servicecfg.PluginOAuthProviders{} - if p.Slack == nil { - return out, trace.BadParameter("when plugin runtime is enabled, at least one plugin provider must be specified") - } - - slack, err := p.Slack.Parse() - if err != nil { - return out, trace.Wrap(err) + if p.Slack != nil { + slack, err := p.Slack.Parse() + if err != nil { + return out, trace.Wrap(err) + } + out.Slack = slack } - out.Slack = slack return out, nil } From 6929f8829f6d97ca15d8dffca1abc4d4cd18e88d Mon Sep 17 00:00:00 2001 From: Trent Clarke Date: Thu, 12 Oct 2023 02:39:28 +1100 Subject: [PATCH 20/22] Release 14.0.3 (#33290) * Release 14.0.3 * Cspell update * Update CHANGELOG.md --- CHANGELOG.md | 44 +++++++++++++ Makefile | 2 +- api/version.go | 2 +- .../macos/tsh/tsh.app/Contents/Info.plist | 4 +- .../macos/tshdev/tsh.app/Contents/Info.plist | 4 +- docs/cspell.json | 1 + examples/chart/teleport-cluster/Chart.yaml | 2 +- .../charts/teleport-operator/Chart.yaml | 2 +- .../auth_deployment_test.yaml.snap | 10 +-- .../proxy_deployment_test.yaml.snap | 18 +++--- examples/chart/teleport-kube-agent/Chart.yaml | 2 +- .../__snapshot__/deployment_test.yaml.snap | 58 ++++++++--------- .../tests/__snapshot__/job_test.yaml.snap | 10 +-- .../__snapshot__/statefulset_test.yaml.snap | 64 +++++++++---------- .../updater_deployment_test.yaml.snap | 4 +- integrations/kube-agent-updater/version.go | 2 +- version.go | 2 +- 17 files changed, 138 insertions(+), 93 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94df8d226481a..793d410f895d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,49 @@ # Changelog +## 14.0.3 (10/11/23) + +### Security Fixes + +#### [Critical] Privilege escalation through `RecursiveChown` + +When using automatic Linux user creation, an attacker could exploit a race +condition in the user creation functionality to `chown` arbitrary files on the +system. + +Users who aren't using automatic Linux host user creation aren’t affected by +this vulnerability. + +[#33248](https://github.com/gravitational/teleport/pull/33248) + +### Other Fixes + + * Fixed spurious timeouts in Database Access Sessions [#32720](https://github.com/gravitational/teleport/pull/32720) + * Azure VM auto-discovery can now find VMs with multiple managed identities [#32800](https://github.com/gravitational/teleport/pull/32800) + * Fixed improperly set Kubernetes impersonation headers [#32848](https://github.com/gravitational/teleport/pull/32848) + * `tsh puttyconfig` now uses `Validity` format for WinSCP compatibility [#32856](https://github.com/gravitational/teleport/pull/32856) + * Teleport client now uses gRPC when connecting to the root cluster [#32662](https://github.com/gravitational/teleport/pull/32662) + * Teleport client now uses gRPC when creating tracing client [#32663](https://github.com/gravitational/teleport/pull/32663) + * Fixed panic on `tsh device enroll --current-device` [#32756](https://github.com/gravitational/teleport/pull/32756) + * The Teleport `etcd` backend will now start if some nodes are unreachable [#32779](https://github.com/gravitational/teleport/pull/32779) + * Fixed certificate verification issues when using `kubectl exec` [#32768](https://github.com/gravitational/teleport/pull/32768) + * Added Discover flow for enrolling EC2 Instances with EICE [#32760](https://github.com/gravitational/teleport/pull/32760) + * Added connection information to multiplexer logs [#32738](https://github.com/gravitational/teleport/pull/32738) + * Fixed issue causing keys to be incorrectly removed in tsh and Teleport Connect on Windows [#32963](https://github.com/gravitational/teleport/pull/32963) + * Improved Unified Resource Cache performance [#33027](https://github.com/gravitational/teleport/pull/33027) + * Adds Audit Review recurrence presets [#32960](https://github.com/gravitational/teleport/pull/32960) + * Fixed multiple discovery install attempts on Azure & GCP VMs [#32569](https://github.com/gravitational/teleport/pull/32569) + * Fixed a corner case of privilege tokens where MFA devices disabled by cluster settings were still counted against the user [#32430](https://github.com/gravitational/teleport/pull/32430) + * Fixed Access List caching & eventing issues [#32649](https://github.com/gravitational/teleport/pull/32649) + * Fixed user session tracking across trusted clusters [#32967](https://github.com/gravitational/teleport/pull/32967) + * Added cost optimized pagination search for athena [#33007](https://github.com/gravitational/teleport/pull/33007) + * Teleport now reports initial command to session moderators [#33112](https://github.com/gravitational/teleport/pull/33112) + * OneOff install script now installs enterprise Teleport when generated by an enterprise cluster [#33148](https://github.com/gravitational/teleport/pull/33148) + * Fixed issue when playing back a session recorded on a leaf cluster [#33102](https://github.com/gravitational/teleport/pull/33102) + * Fixed self-signed certificate issue on macOS [#33156](https://github.com/gravitational/teleport/pull/33156) + * Discovery EC2 instance listing now shows instance name [#33179](https://github.com/gravitational/teleport/pull/33179) + * Fixed HTTP connection hijack issue when using `tsh proxy kube` [#33172](https://github.com/gravitational/teleport/pull/33172) + * Improved error messaging in `tsh kube credentials` when root cluster roles don't allow Kube access [#33210](https://github.com/gravitational/teleport/pull/33210) + ## 14.0.1 (09/26/23) * Fixed issue where Teleport Connect Kube terminal throws an internal server error [#32612](https://github.com/gravitational/teleport/pull/32612) diff --git a/Makefile b/Makefile index d19a0548911b8..9cdbe6d88f5cf 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ # Stable releases: "1.0.0" # Pre-releases: "1.0.0-alpha.1", "1.0.0-beta.2", "1.0.0-rc.3" # Master/dev branch: "1.0.0-dev" -VERSION=14.0.1 +VERSION=14.0.3 DOCKER_IMAGE ?= teleport diff --git a/api/version.go b/api/version.go index 067a032265ba5..458bfdce665cf 100644 --- a/api/version.go +++ b/api/version.go @@ -1,7 +1,7 @@ // Code generated by "make version". DO NOT EDIT. package api -const Version = "14.0.1" +const Version = "14.0.3" // Gitref is set to the output of "git describe" during the build process. var Gitref string diff --git a/build.assets/macos/tsh/tsh.app/Contents/Info.plist b/build.assets/macos/tsh/tsh.app/Contents/Info.plist index 33f22917998a6..68097273ef018 100644 --- a/build.assets/macos/tsh/tsh.app/Contents/Info.plist +++ b/build.assets/macos/tsh/tsh.app/Contents/Info.plist @@ -19,13 +19,13 @@ CFBundlePackageType APPL CFBundleShortVersionString - 14.0.1 + 14.0.3 CFBundleSupportedPlatforms MacOSX CFBundleVersion - 14.0.1 + 14.0.3 DTCompiler com.apple.compilers.llvm.clang.1_0 DTPlatformBuild diff --git a/build.assets/macos/tshdev/tsh.app/Contents/Info.plist b/build.assets/macos/tshdev/tsh.app/Contents/Info.plist index 7cf705474b72c..3371fccac2469 100644 --- a/build.assets/macos/tshdev/tsh.app/Contents/Info.plist +++ b/build.assets/macos/tshdev/tsh.app/Contents/Info.plist @@ -17,13 +17,13 @@ CFBundlePackageType APPL CFBundleShortVersionString - 14.0.1 + 14.0.3 CFBundleSupportedPlatforms MacOSX CFBundleVersion - 14.0.1 + 14.0.3 DTCompiler com.apple.compilers.llvm.clang.1_0 DTPlatformBuild diff --git a/docs/cspell.json b/docs/cspell.json index e0feb6c821da7..7617d96c73574 100644 --- a/docs/cspell.json +++ b/docs/cspell.json @@ -51,6 +51,7 @@ "Divio's", "EBSCSI", "ECMWF", + "EICE", "EKCert", "ERRO", "Elastcsearch", diff --git a/examples/chart/teleport-cluster/Chart.yaml b/examples/chart/teleport-cluster/Chart.yaml index ec3ca40af4758..73417f43f0988 100644 --- a/examples/chart/teleport-cluster/Chart.yaml +++ b/examples/chart/teleport-cluster/Chart.yaml @@ -1,4 +1,4 @@ -.version: &version "14.0.1" +.version: &version "14.0.3" name: teleport-cluster apiVersion: v2 diff --git a/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml b/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml index d7f0a947e44dc..eff947b5d01c9 100644 --- a/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml +++ b/examples/chart/teleport-cluster/charts/teleport-operator/Chart.yaml @@ -1,4 +1,4 @@ -.version: &version "14.0.1" +.version: &version "14.0.3" name: teleport-operator apiVersion: v2 diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap index cfc3b88b19945..bf328cb4dbe6e 100644 --- a/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap +++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap @@ -1,6 +1,6 @@ should add an operator side-car when operator is enabled: 1: | - image: public.ecr.aws/gravitational/teleport-operator:14.0.1 + image: public.ecr.aws/gravitational/teleport-operator:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -41,7 +41,7 @@ should add an operator side-car when operator is enabled: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -174,7 +174,7 @@ should set nodeSelector when set in values: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -271,7 +271,7 @@ should set resources when set in values: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -357,7 +357,7 @@ should set securityContext when set in values: - args: - --diag-addr=0.0.0.0:3000 - --apply-on-startup=/etc/teleport/apply-on-startup.yaml - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap index 3ecdcf160882e..5f0ac6c214fee 100644 --- a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap +++ b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap @@ -5,7 +5,7 @@ should provision initContainer correctly when set in values: - wait - no-resolve - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 name: wait-auth-update - args: - echo test @@ -62,7 +62,7 @@ should set nodeSelector when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -123,7 +123,7 @@ should set nodeSelector when set in values: - wait - no-resolve - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 name: wait-auth-update nodeSelector: environment: security @@ -174,7 +174,7 @@ should set resources when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -242,7 +242,7 @@ should set resources when set in values: - wait - no-resolve - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 name: wait-auth-update serviceAccountName: RELEASE-NAME-proxy terminationGracePeriodSeconds: 60 @@ -275,7 +275,7 @@ should set securityContext for initContainers when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -343,7 +343,7 @@ should set securityContext for initContainers when set in values: - wait - no-resolve - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 name: wait-auth-update securityContext: allowPrivilegeEscalation: false @@ -383,7 +383,7 @@ should set securityContext when set in values: containers: - args: - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: @@ -451,7 +451,7 @@ should set securityContext when set in values: - wait - no-resolve - RELEASE-NAME-auth-v13.NAMESPACE.svc.cluster.local - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 name: wait-auth-update securityContext: allowPrivilegeEscalation: false diff --git a/examples/chart/teleport-kube-agent/Chart.yaml b/examples/chart/teleport-kube-agent/Chart.yaml index 4f44ab778891d..74ff96235e392 100644 --- a/examples/chart/teleport-kube-agent/Chart.yaml +++ b/examples/chart/teleport-kube-agent/Chart.yaml @@ -1,4 +1,4 @@ -.version: &version "14.0.1" +.version: &version "14.0.3" name: teleport-kube-agent apiVersion: v2 diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap index ac963d13f5028..9827be009ac30 100644 --- a/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap +++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/deployment_test.yaml.snap @@ -30,7 +30,7 @@ sets Deployment annotations when specified if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -101,7 +101,7 @@ sets Deployment labels when specified if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -159,7 +159,7 @@ sets Pod annotations when specified if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -217,7 +217,7 @@ sets Pod labels when specified if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -292,7 +292,7 @@ should add emptyDir for data when existingDataVolume is not set if action is Upg env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -351,7 +351,7 @@ should add insecureSkipProxyTLSVerify to args when set in values if action is Up env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -409,7 +409,7 @@ should correctly configure existingDataVolume when set if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -465,7 +465,7 @@ should expose diag port if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -535,7 +535,7 @@ should have multiple replicas when replicaCount is set (using .replicaCount, dep env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -605,7 +605,7 @@ should have multiple replicas when replicaCount is set (using highAvailability.r env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -663,7 +663,7 @@ should have one replica when replicaCount is not set if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -721,7 +721,7 @@ should mount extraVolumes and extraVolumeMounts if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -786,7 +786,7 @@ should mount tls.existingCASecretName and set environment when set in values if value: "true" - name: SSL_CERT_FILE value: /etc/teleport-tls-ca/ca.pem - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -854,7 +854,7 @@ should mount tls.existingCASecretName and set extra environment when set in valu value: http://username:password@my.proxy.host:3128 - name: SSL_CERT_FILE value: /etc/teleport-tls-ca/ca.pem - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -918,7 +918,7 @@ should provision initContainer correctly when set in values if action is Upgrade env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1012,7 +1012,7 @@ should set SecurityContext if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1090,7 +1090,7 @@ should set affinity when set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1148,7 +1148,7 @@ should set default serviceAccountName when not set in values if action is Upgrad env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1219,7 +1219,7 @@ should set environment when extraEnv set in values if action is Upgrade: value: "true" - name: HTTPS_PROXY value: http://username:password@my.proxy.host:3128 - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1335,7 +1335,7 @@ should set imagePullPolicy when set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: Always livenessProbe: failureThreshold: 6 @@ -1393,7 +1393,7 @@ should set nodeSelector if set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1453,7 +1453,7 @@ should set not set priorityClassName when not set in values if action is Upgrade env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1523,7 +1523,7 @@ should set preferred affinity when more than one replica is used if action is Up env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1581,7 +1581,7 @@ should set priorityClassName when set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1640,7 +1640,7 @@ should set probeTimeoutSeconds when set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1708,7 +1708,7 @@ should set required affinity when highAvailability.requireAntiAffinity is set if env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1766,7 +1766,7 @@ should set resources when set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1831,7 +1831,7 @@ should set serviceAccountName when set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1889,7 +1889,7 @@ should set tolerations when set in values if action is Upgrade: env: - name: TELEPORT_INSTALL_METHOD_HELM_KUBE_AGENT value: "true" - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap index a67c38a8f8d1c..a742965bbd88d 100644 --- a/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap +++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/job_test.yaml.snap @@ -25,7 +25,7 @@ should create ServiceAccount for post-delete hook by default: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent name: post-delete-job securityContext: @@ -104,7 +104,7 @@ should not create ServiceAccount for post-delete hook if serviceAccount.create i fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent name: post-delete-job securityContext: @@ -132,7 +132,7 @@ should not create ServiceAccount, Role or RoleBinding for post-delete hook if se fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent name: post-delete-job securityContext: @@ -160,7 +160,7 @@ should set nodeSelector in post-delete hook: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent name: post-delete-job securityContext: @@ -190,7 +190,7 @@ should set securityContext in post-delete hook: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent name: post-delete-job securityContext: diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap index 1bef24f761b69..8868291a8416f 100644 --- a/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap +++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/statefulset_test.yaml.snap @@ -16,7 +16,7 @@ sets Pod annotations when specified: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -84,7 +84,7 @@ sets Pod labels when specified: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -176,7 +176,7 @@ sets StatefulSet labels when specified: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -272,7 +272,7 @@ should add insecureSkipProxyTLSVerify to args when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -340,7 +340,7 @@ should add volumeClaimTemplate for data volume when using StatefulSet and action fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -428,7 +428,7 @@ should add volumeClaimTemplate for data volume when using StatefulSet and is Fre fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -506,7 +506,7 @@ should add volumeMount for data volume when using StatefulSet: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -574,7 +574,7 @@ should expose diag port: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -642,7 +642,7 @@ should generate Statefulset when storage is disabled and mode is a Upgrade: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -724,7 +724,7 @@ should have multiple replicas when replicaCount is set (using .replicaCount, dep fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -804,7 +804,7 @@ should have multiple replicas when replicaCount is set (using highAvailability.r fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -872,7 +872,7 @@ should have one replica when replicaCount is not set: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -940,7 +940,7 @@ should install Statefulset when storage is disabled and mode is a Fresh Install: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1010,7 +1010,7 @@ should mount extraVolumes and extraVolumeMounts: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1085,7 +1085,7 @@ should mount tls.existingCASecretName and set environment when set in values: value: RELEASE-NAME - name: SSL_CERT_FILE value: /etc/teleport-tls-ca/ca.pem - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1165,7 +1165,7 @@ should mount tls.existingCASecretName and set extra environment when set in valu value: /etc/teleport-tls-ca/ca.pem - name: HTTPS_PROXY value: http://username:password@my.proxy.host:3128 - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1241,7 +1241,7 @@ should not add emptyDir for data when using StatefulSet: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1309,7 +1309,7 @@ should provision initContainer correctly when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1413,7 +1413,7 @@ should set SecurityContext: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1501,7 +1501,7 @@ should set affinity when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1569,7 +1569,7 @@ should set default serviceAccountName when not set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1650,7 +1650,7 @@ should set environment when extraEnv set in values: value: RELEASE-NAME - name: HTTPS_PROXY value: http://username:password@my.proxy.host:3128 - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1786,7 +1786,7 @@ should set imagePullPolicy when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: Always livenessProbe: failureThreshold: 6 @@ -1854,7 +1854,7 @@ should set nodeSelector if set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -1936,7 +1936,7 @@ should set preferred affinity when more than one replica is used: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -2004,7 +2004,7 @@ should set probeTimeoutSeconds when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -2082,7 +2082,7 @@ should set required affinity when highAvailability.requireAntiAffinity is set: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -2150,7 +2150,7 @@ should set resources when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -2225,7 +2225,7 @@ should set serviceAccountName when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -2293,7 +2293,7 @@ should set storage.requests when set in values and action is an Upgrade: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -2361,7 +2361,7 @@ should set storage.storageClassName when set in values and action is an Upgrade: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -2429,7 +2429,7 @@ should set tolerations when set in values: fieldPath: metadata.namespace - name: RELEASE_NAME value: RELEASE-NAME - image: public.ecr.aws/gravitational/teleport-distroless:14.0.1 + image: public.ecr.aws/gravitational/teleport-distroless:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 diff --git a/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap b/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap index d62cd79169e85..864ab26e30d75 100644 --- a/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap +++ b/examples/chart/teleport-kube-agent/tests/__snapshot__/updater_deployment_test.yaml.snap @@ -27,7 +27,7 @@ sets the affinity: - --base-image=public.ecr.aws/gravitational/teleport-distroless - --version-server=https://my-custom-version-server/v1 - --version-channel=custom/preview - image: public.ecr.aws/gravitational/teleport-kube-agent-updater:14.0.1 + image: public.ecr.aws/gravitational/teleport-kube-agent-updater:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 @@ -71,7 +71,7 @@ sets the tolerations: - --base-image=public.ecr.aws/gravitational/teleport-distroless - --version-server=https://my-custom-version-server/v1 - --version-channel=custom/preview - image: public.ecr.aws/gravitational/teleport-kube-agent-updater:14.0.1 + image: public.ecr.aws/gravitational/teleport-kube-agent-updater:14.0.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 6 diff --git a/integrations/kube-agent-updater/version.go b/integrations/kube-agent-updater/version.go index e4e4ee4ff005a..6c4caf44bd79f 100644 --- a/integrations/kube-agent-updater/version.go +++ b/integrations/kube-agent-updater/version.go @@ -1,7 +1,7 @@ // Code generated by "make version". DO NOT EDIT. package kubeversionupdater -const Version = "14.0.1" +const Version = "14.0.3" // Gitref is set to the output of "git describe" during the build process. var Gitref string diff --git a/version.go b/version.go index a2e7674b2266a..2ae0b91cbb27a 100644 --- a/version.go +++ b/version.go @@ -1,7 +1,7 @@ // Code generated by "make version". DO NOT EDIT. package teleport -const Version = "14.0.1" +const Version = "14.0.3" // Gitref is set to the output of "git describe" during the build process. var Gitref string From 5079dd8c83de795153eebaa99e4c2cf857054c17 Mon Sep 17 00:00:00 2001 From: Tiago Silva Date: Wed, 11 Oct 2023 16:50:59 +0100 Subject: [PATCH 21/22] [v14] Add param `extraContainers` to `teleport-cluster` and `teleport-kube-agent` (#33299) * Add param `extraContainers` to `teleport-cluster` and `teleport-kube-agent` (close #6832) This allows to add side containers to Teleport and Teleport-Agent pods. Signed-off-by: Tiago Silva * fix unit tests --------- Signed-off-by: Tiago Silva Co-authored-by: Kseniya Shaydurova --- .../.lint/extra-containers.yaml | 12 +++ .../templates/auth/deployment.yaml | 3 + .../templates/proxy/deployment.yaml | 3 + .../tests/auth_deployment_test.yaml | 31 +++++++ .../tests/proxy_deployment_test.yaml | 30 ++++++ .../chart/teleport-cluster/values.schema.json | 6 ++ examples/chart/teleport-cluster/values.yaml | 13 +++ .../.lint/extra-containers.yaml | 15 +++ .../templates/deployment.yaml | 3 + .../templates/statefulset.yaml | 3 + .../tests/deployment_test.yaml | 92 +++++++++++++------ .../tests/statefulset_test.yaml | 43 +++++++-- .../teleport-kube-agent/values.schema.json | 6 ++ .../chart/teleport-kube-agent/values.yaml | 13 +++ 14 files changed, 238 insertions(+), 35 deletions(-) create mode 100644 examples/chart/teleport-cluster/.lint/extra-containers.yaml create mode 100644 examples/chart/teleport-kube-agent/.lint/extra-containers.yaml diff --git a/examples/chart/teleport-cluster/.lint/extra-containers.yaml b/examples/chart/teleport-cluster/.lint/extra-containers.yaml new file mode 100644 index 0000000000000..14d04af93cf8c --- /dev/null +++ b/examples/chart/teleport-cluster/.lint/extra-containers.yaml @@ -0,0 +1,12 @@ +clusterName: helm-lint.example.com +extraContainers: + - name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false diff --git a/examples/chart/teleport-cluster/templates/auth/deployment.yaml b/examples/chart/teleport-cluster/templates/auth/deployment.yaml index 8b86131d3113a..699d1353ca847 100644 --- a/examples/chart/teleport-cluster/templates/auth/deployment.yaml +++ b/examples/chart/teleport-cluster/templates/auth/deployment.yaml @@ -270,6 +270,9 @@ spec: readOnly: true {{- end }} {{ end }} +{{- if $auth.extraContainers }} + {{- toYaml $auth.extraContainers | nindent 6 }} +{{- end }} {{- if $projectedServiceAccountToken }} automountServiceAccountToken: false {{- end }} diff --git a/examples/chart/teleport-cluster/templates/proxy/deployment.yaml b/examples/chart/teleport-cluster/templates/proxy/deployment.yaml index a77c339b3087c..68cfbd5a48c63 100644 --- a/examples/chart/teleport-cluster/templates/proxy/deployment.yaml +++ b/examples/chart/teleport-cluster/templates/proxy/deployment.yaml @@ -255,6 +255,9 @@ spec: {{- if $proxy.extraVolumeMounts }} {{- toYaml $proxy.extraVolumeMounts | nindent 8 }} {{- end }} +{{- if $proxy.extraContainers }} + {{- toYaml $proxy.extraContainers | nindent 6 }} +{{- end }} {{- if $projectedServiceAccountToken }} automountServiceAccountToken: false {{- end }} diff --git a/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml index cc8cb581ccff2..d838ca97fbd2d 100644 --- a/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml @@ -304,6 +304,7 @@ tests: name: my-mount secret: secretName: mySecret + - it: should set imagePullPolicy when set in values template: auth/deployment.yaml set: @@ -314,6 +315,36 @@ tests: path: spec.template.spec.containers[0].imagePullPolicy value: Always + - it: should have only one container when no `extraContainers` is set in values + template: auth/deployment.yaml + set: + extraContainers: [] + clusterName: helm-lint.example.com + asserts: + - isNotNull: + path: spec.template.spec.containers[0] + - isNull: + path: spec.template.spec.containers[1] + + - it: should add one more container when `extraContainers` is set in values + template: auth/deployment.yaml + values: + - ../.lint/extra-containers.yaml + asserts: + - equal: + path: spec.template.spec.containers[1] + value: + name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false + - it: should set environment when extraEnv set in values template: auth/deployment.yaml values: diff --git a/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml b/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml index 4c4ddf4a9c304..c4cbfcc8fad2d 100644 --- a/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml +++ b/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml @@ -332,6 +332,36 @@ tests: path: spec.template.spec.containers[0].imagePullPolicy value: Always + - it: should have only one container when no `extraContainers` is set in values + template: proxy/deployment.yaml + set: + extraContainers: [] + clusterName: helm-lint.example.com + asserts: + - isNotNull: + path: spec.template.spec.containers[0] + - isNull: + path: spec.template.spec.containers[1] + + - it: should add one more container when `extraContainers` is set in values + template: proxy/deployment.yaml + values: + - ../.lint/extra-containers.yaml + asserts: + - equal: + path: spec.template.spec.containers[1] + value: + name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false + - it: should set environment when extraEnv set in values template: proxy/deployment.yaml values: diff --git a/examples/chart/teleport-cluster/values.schema.json b/examples/chart/teleport-cluster/values.schema.json index 83178749743cf..675f9b5750636 100644 --- a/examples/chart/teleport-cluster/values.schema.json +++ b/examples/chart/teleport-cluster/values.schema.json @@ -19,6 +19,7 @@ "affinity", "nodeSelector", "annotations", + "extraContainers", "extraVolumes", "extraVolumeMounts", "imagePullPolicy", @@ -888,6 +889,11 @@ "type": "array", "default": [] }, + "extraContainers": { + "$id": "#/properties/extraContainers", + "type": "array", + "default": [] + }, "extraVolumes": { "$id": "#/properties/extraVolumes", "type": "array", diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index d52430669ebc0..1a11af3edfe7b 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -609,6 +609,19 @@ extraArgs: [] # Extra environment to be configured on the Teleport pod extraEnv: [] +# Extra containers to be added to the Teleport pod +extraContainers: [] +# - name: nscenter +# command: +# - /bin/bash +# - -c +# - sleep infinity & wait +# image: praqma/network-multitool +# imagePullPolicy: IfNotPresent +# securityContext: +# privileged: true +# runAsNonRoot: false + # Extra volumes to mount into the Teleport pods # https://kubernetes.io/docs/concepts/storage/volumes/ extraVolumes: [] diff --git a/examples/chart/teleport-kube-agent/.lint/extra-containers.yaml b/examples/chart/teleport-kube-agent/.lint/extra-containers.yaml new file mode 100644 index 0000000000000..7d7dd36bbe23c --- /dev/null +++ b/examples/chart/teleport-kube-agent/.lint/extra-containers.yaml @@ -0,0 +1,15 @@ +authToken: auth-token +proxyAddr: proxy.example.com:3080 +roles: kube +kubeClusterName: test-kube-cluster +extraContainers: + - name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false diff --git a/examples/chart/teleport-kube-agent/templates/deployment.yaml b/examples/chart/teleport-kube-agent/templates/deployment.yaml index 30b7924fe8b84..65eae94d873bd 100644 --- a/examples/chart/teleport-kube-agent/templates/deployment.yaml +++ b/examples/chart/teleport-kube-agent/templates/deployment.yaml @@ -189,6 +189,9 @@ spec: {{- end }} {{- if .Values.extraVolumeMounts }} {{- toYaml .Values.extraVolumeMounts | nindent 8 }} +{{- end }} +{{- if .Values.extraContainers }} + {{- toYaml .Values.extraContainers | nindent 6 }} {{- end }} volumes: - name: "config" diff --git a/examples/chart/teleport-kube-agent/templates/statefulset.yaml b/examples/chart/teleport-kube-agent/templates/statefulset.yaml index 4f310106233fe..3f8220fcb44f4 100644 --- a/examples/chart/teleport-kube-agent/templates/statefulset.yaml +++ b/examples/chart/teleport-kube-agent/templates/statefulset.yaml @@ -206,6 +206,9 @@ spec: {{- end }} {{- if .Values.extraVolumeMounts }} {{- toYaml .Values.extraVolumeMounts | nindent 8 }} +{{- end }} +{{- if .Values.extraContainers }} + {{- toYaml .Values.extraContainers | nindent 6 }} {{- end }} volumes: - name: "config" diff --git a/examples/chart/teleport-kube-agent/tests/deployment_test.yaml b/examples/chart/teleport-kube-agent/tests/deployment_test.yaml index 1c4926cb7b601..e8699ffd9205e 100644 --- a/examples/chart/teleport-kube-agent/tests/deployment_test.yaml +++ b/examples/chart/teleport-kube-agent/tests/deployment_test.yaml @@ -8,7 +8,7 @@ tests: - it: creates a Deployment if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -22,7 +22,7 @@ tests: - it: sets Deployment labels when specified if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -40,7 +40,7 @@ tests: - it: sets Pod labels when specified if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -58,7 +58,7 @@ tests: - it: sets Deployment annotations when specified if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -75,7 +75,7 @@ tests: - it: sets Pod annotations when specified if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -93,7 +93,7 @@ tests: - it: should have one replica when replicaCount is not set if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -108,7 +108,7 @@ tests: - it: should have multiple replicas when replicaCount is set (using .replicaCount, deprecated) if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true replicaCount: 3 @@ -141,7 +141,7 @@ tests: - it: should set affinity when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -157,7 +157,7 @@ tests: values: - ../.lint/backwards-compatibility.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true @@ -181,7 +181,7 @@ tests: values: - ../.lint/backwards-compatibility.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true @@ -202,7 +202,7 @@ tests: - it: should set tolerations when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -216,7 +216,7 @@ tests: - it: should set resources when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -240,7 +240,7 @@ tests: - it: should set SecurityContext if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -271,7 +271,7 @@ tests: values: - ../.lint/backwards-compatibility.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true @@ -283,10 +283,44 @@ tests: - matchSnapshot: path: spec.template.spec + - it: should have only one container when no `extraContainers` is set in values + template: deployment.yaml + set: + extraContainers: [] + proxyAddr: helm-lint.example.com + kubeClusterName: helm-lint.example.com + unitTestUpgrade: true + asserts: + - isNotNull: + path: spec.template.spec.containers[0] + - isNull: + path: spec.template.spec.containers[1] + + - it: should add one more container when `extraContainers` is set in values + template: deployment.yaml + set: + unitTestUpgrade: true + values: + - ../.lint/extra-containers.yaml + asserts: + - equal: + path: spec.template.spec.containers[1] + value: + name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false + - it: should mount extraVolumes and extraVolumeMounts if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -311,7 +345,7 @@ tests: values: - ../.lint/backwards-compatibility.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true @@ -326,7 +360,7 @@ tests: - it: should set environment when extraEnv set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true @@ -348,7 +382,7 @@ tests: - it: should provision initContainer correctly when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -383,7 +417,7 @@ tests: values: - ../.lint/backwards-compatibility.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true @@ -398,7 +432,7 @@ tests: - it: should expose diag port if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -416,7 +450,7 @@ tests: - it: should set nodeSelector if set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -432,7 +466,7 @@ tests: - it: should add emptyDir for data when existingDataVolume is not set if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -454,7 +488,7 @@ tests: - it: should correctly configure existingDataVolume when set if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -476,7 +510,7 @@ tests: - it: should mount tls.existingCASecretName and set environment when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -505,7 +539,7 @@ tests: - it: should mount tls.existingCASecretName and set extra environment when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -540,7 +574,7 @@ tests: - it: should set priorityClassName when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -555,7 +589,7 @@ tests: - it: should set not set priorityClassName when not set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -569,7 +603,7 @@ tests: - it: should set serviceAccountName when set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: @@ -584,7 +618,7 @@ tests: - it: should set default serviceAccountName when not set in values if action is Upgrade template: deployment.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true values: diff --git a/examples/chart/teleport-kube-agent/tests/statefulset_test.yaml b/examples/chart/teleport-kube-agent/tests/statefulset_test.yaml index c418b7d96451c..00aba14cfcac9 100644 --- a/examples/chart/teleport-kube-agent/tests/statefulset_test.yaml +++ b/examples/chart/teleport-kube-agent/tests/statefulset_test.yaml @@ -217,6 +217,37 @@ tests: - matchSnapshot: path: spec.template.spec + - it: should have only one container when no `extraContainers` is set in values + template: statefulset.yaml + set: + extraContainers: [] + proxyAddr: helm-lint.example.com + kubeClusterName: helm-lint.example.com + asserts: + - isNotNull: + path: spec.template.spec.containers[0] + - isNull: + path: spec.template.spec.containers[1] + + - it: should add one more container when `extraContainers` is set in values + template: statefulset.yaml + values: + - ../.lint/extra-containers.yaml + asserts: + - equal: + path: spec.template.spec.containers[1] + value: + name: nscenter + command: + - /bin/bash + - -c + - sleep infinity & wait + image: praqma/network-multitool + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsNonRoot: false + - it: should mount extraVolumes and extraVolumeMounts template: statefulset.yaml values: @@ -404,7 +435,7 @@ tests: values: - ../.lint/stateful.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true release: @@ -420,7 +451,7 @@ tests: values: - ../.lint/stateful.yaml set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true release: @@ -437,7 +468,7 @@ tests: release: upgrade: true set: - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true @@ -459,7 +490,7 @@ tests: set: storage: requests: 256Mi - # unit test does not support lookup functions, so to test the behavior we use this undoc value + # unit test does not support lookup functions, so to test the behavior we use this undoc value # https://github.com/helm/helm/issues/8137 unitTestUpgrade: true asserts: @@ -594,7 +625,7 @@ tests: values: - ../.lint/stateful.yaml set: - storage: + storage: enabled: false asserts: - contains: @@ -629,7 +660,7 @@ tests: - ../.lint/stateful.yaml set: unitTestUpgrade: true - storage: + storage: enabled: false asserts: - contains: diff --git a/examples/chart/teleport-kube-agent/values.schema.json b/examples/chart/teleport-kube-agent/values.schema.json index 815cf8d5a79ca..fa237bde8d5b8 100644 --- a/examples/chart/teleport-kube-agent/values.schema.json +++ b/examples/chart/teleport-kube-agent/values.schema.json @@ -29,6 +29,7 @@ "log", "affinity", "annotations", + "extraContainers", "extraVolumes", "extraVolumeMounts", "imagePullPolicy", @@ -598,6 +599,11 @@ "type": "array", "default": [] }, + "extraContainers": { + "$id": "#/properties/extraContainers", + "type": "array", + "default": [] + }, "extraVolumes": { "$id": "#/properties/extraVolumes", "type": "array", diff --git a/examples/chart/teleport-kube-agent/values.yaml b/examples/chart/teleport-kube-agent/values.yaml index 5c381bf1a3e06..3e2317d9e191d 100644 --- a/examples/chart/teleport-kube-agent/values.yaml +++ b/examples/chart/teleport-kube-agent/values.yaml @@ -394,6 +394,19 @@ extraArgs: [] # Extra environment to be configured on the Teleport pod extraEnv: [] +# Extra containers to be added to the Teleport pod +extraContainers: [] +# - name: nscenter +# command: +# - /bin/bash +# - -c +# - sleep infinity & wait +# image: praqma/network-multitool +# imagePullPolicy: IfNotPresent +# securityContext: +# privileged: true +# runAsNonRoot: false + # Extra volumes to mount into the Teleport pods # https://kubernetes.io/docs/concepts/storage/volumes/ extraVolumes: [] From 6d5701e8d1ad155baed0aa222bc6c6760e40c552 Mon Sep 17 00:00:00 2001 From: Lisa Gunn Date: Tue, 10 Oct 2023 13:20:25 -0700 Subject: [PATCH 22/22] Split openssh into two topics --- docs/pages/server-access/guides/openssh/openssh.mdx | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/pages/server-access/guides/openssh/openssh.mdx b/docs/pages/server-access/guides/openssh/openssh.mdx index 2992492c2a2bb..b05e70dc87fbe 100644 --- a/docs/pages/server-access/guides/openssh/openssh.mdx +++ b/docs/pages/server-access/guides/openssh/openssh.mdx @@ -179,7 +179,6 @@ This command creates an SSH configuration file at a nonstandard location in order to make it easier to clean up, but you can append the output of `tsh config` to the default SSH config file (`~/.ssh/config`) if you wish. -
Teleport implements an SSH server that includes several **subsystems**, or