Skip to content

Commit 1661b60

Browse files
committed
update tests
1 parent 9964b00 commit 1661b60

11 files changed

+324
-965
lines changed

cli/pkg/lifecycle/get_credentials.go

+33-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
gateway "cloud.google.com/go/gkeconnect/gateway/apiv1"
99
gatewaypb "cloud.google.com/go/gkeconnect/gateway/apiv1/gatewaypb"
1010
log "github.com/sirupsen/logrus"
11+
"google.golang.org/api/cloudresourcemanager/v1"
1112
"google.golang.org/api/gkehub/v1"
1213
"google.golang.org/api/option"
1314
"k8s.io/client-go/tools/clientcmd"
@@ -34,6 +35,13 @@ func GenerateKubeConfig(fleetProjectId string) (*clientcmdapi.Config, error) {
3435
return nil, err
3536
}
3637

38+
// Get the project number for the fleet project needed later in the generate credentials request
39+
projectNumber, err := getProjectNumber(fleetProjectId)
40+
if err != nil {
41+
log.Errorf("Failed to get project number: %v", err)
42+
return nil, err
43+
}
44+
3745
// Create a new kubeconfig.
3846
config := clientcmdapi.NewConfig()
3947

@@ -55,7 +63,7 @@ func GenerateKubeConfig(fleetProjectId string) (*clientcmdapi.Config, error) {
5563

5664
// Create a Gateway Control Client with the correct endpoint
5765
ctx2 := context.Background()
58-
gcc, err := gateway.NewGatewayControlClient(ctx2, option.WithEndpoint(endpoint))
66+
gcc, err := gateway.NewGatewayControlRESTClient(ctx2, option.WithEndpoint(endpoint))
5967
if err != nil {
6068
log.Errorf("Failed to create gateway control client for %s: %v", membershipName, err)
6169
failedMemberships = append(failedMemberships, membershipName)
@@ -65,6 +73,10 @@ func GenerateKubeConfig(fleetProjectId string) (*clientcmdapi.Config, error) {
6573

6674
log.Infof("Generating credentials for membership: %s", membershipName)
6775

76+
// Construct the correct membership name with project number
77+
membershipName = fmt.Sprintf("projects/%s/locations/%s/memberships/%s",
78+
projectNumber, membershipLocation, extractMembershipID(membership.Name))
79+
6880
// Generate credentials for each membership
6981
req := &gatewaypb.GenerateCredentialsRequest{
7082
Name: membershipName,
@@ -126,3 +138,23 @@ func extractLocation(path string) string {
126138
}
127139
return ""
128140
}
141+
142+
func extractMembershipID(membershipName string) string {
143+
parts := strings.Split(membershipName, "/")
144+
return parts[len(parts)-1]
145+
}
146+
147+
func getProjectNumber(projectID string) (string, error) {
148+
ctx := context.Background()
149+
crmService, err := cloudresourcemanager.NewService(ctx)
150+
if err != nil {
151+
return "", fmt.Errorf("failed to create Resource Manager client: %v", err)
152+
}
153+
154+
project, err := crmService.Projects.Get(projectID).Do()
155+
if err != nil {
156+
return "", fmt.Errorf("failed to get project: %v", err)
157+
}
158+
159+
return fmt.Sprintf("%d", project.ProjectNumber), nil
160+
}

demos/fleets/README.md

+210
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,210 @@
1+
# 🚲 GKE Poc Toolkit Demo: GKE Fleet setup with ConfigSync and Argo Rollouts
2+
This demo shows you how to bootstrap a Fleet of GKE clusters using Config Sync as your gitops engine and Argo Rollouts to progressively release app updates.
3+
Services in play:
4+
* [ConfigSync](https://cloud.google.com/anthos-config-management/docs/config-sync-overview)
5+
* [Argo Rollouts](https://argoproj.github.io/argo-rollouts/)
6+
* [GKE](https://cloud.google.com/kubernetes-engine/docs)
7+
* [Multi Cluster Services](https://cloud.google.com/kubernetes-engine/docs/concepts/multi-cluster-services)
8+
* [Multi Cluster Ingress](https://cloud.google.com/kubernetes-engine/docs/concepts/multi-cluster-ingress)
9+
* [Anthos Service Mesh w/ Managed Control Plane](https://cloud.google.com/service-mesh/docs/overview#managed_anthos_service_mesh)
10+
11+
12+
13+
![diagram](assets/diagram.png)
14+
15+
## Fleet Infra setup
16+
17+
1. **Initiliaze the GKE POC Toolkit (gkekitctl init).**
18+
```bash
19+
export GKE_PROJECT_ID=<your-project-id>
20+
export OS="darwin" # choice of darwin or amd64
21+
```
22+
23+
```bash
24+
gcloud config set project $GKE_PROJECT_ID
25+
gcloud auth login
26+
gcloud auth application-default login
27+
28+
ROOT_DIR=`pwd`
29+
mkdir gke-poc-toolkit && cd "$_"
30+
VERSION=$(curl -s https://api.github.com/repos/GoogleCloudPlatform/gke-poc-toolkit/releases/latest | grep browser_download_url | cut -d "/" -f 8 | tail -1)
31+
curl -sLSf -o ./gkekitctl https://github.com/GoogleCloudPlatform/gke-poc-toolkit/releases/download/${VERSION}/gkekitctl-${OS} && chmod +x ./gkekitctl
32+
33+
./gkekitctl init
34+
```
35+
36+
2. **Configure the default Config Sync repo.**
37+
```bash
38+
ROOT_DIR=`pwd`
39+
# Set up self signed cert for ASM Ingress Gateway
40+
mkdir tmp
41+
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
42+
-subj "/CN=frontend.endpoints.${GKE_PROJECT_ID}.cloud.goog/O=Edge2Mesh Inc" \
43+
-keyout tmp/frontend.endpoints.${GKE_PROJECT_ID}.cloud.goog.key \
44+
-out tmp/frontend.endpoints.${GKE_PROJECT_ID}.cloud.goog.crt
45+
46+
gcloud secrets create edge2mesh-credential.crt --replication-policy="automatic" --data-file="tmp/frontend.endpoints.${GKE_PROJECT_ID}.cloud.goog.crt"
47+
gcloud secrets create edge2mesh-credential.key --replication-policy="automatic" --data-file="tmp/frontend.endpoints.${GKE_PROJECT_ID}.cloud.goog.key"
48+
49+
rm -rf tmp
50+
51+
# Set Fleet project in the gkekitctl config
52+
cd ${ROOT_DIR}/default-configs
53+
if [[ "$OSTYPE" == "darwin"* ]]; then
54+
sed -i '' -e "s/clustersProjectId: \"my-project\"/clustersProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
55+
sed -i '' -e "s/fleetProjectId: \"my-project\"/fleetProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
56+
sed -i '' -e "s/vpcProjectId: \"my-project\"/vpcProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
57+
else
58+
sed -i -e "s/clustersProjectId: \"my-project\"/clustersProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
59+
sed -i -e "s/fleetProjectId: \"my-project\"/fleetProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
60+
sed -i -e "s/vpcProjectId: \"my-project\"/vpcProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
61+
fi
62+
```
63+
64+
3. **Export vars and add them to your GKE POC toolkit config.yaml.**
65+
``` bash
66+
cp ${ROOT_DIR}/config.yaml ${ROOT_DIR}/gke-poc-toolkit
67+
cd ${ROOT_DIR}/gke-poc-toolkit
68+
if [[ "$OSTYPE" == "darwin"* ]]; then
69+
sed -i '' -e "s/clustersProjectId: \"my-project\"/clustersProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
70+
sed -i '' -e "s/fleetProjectId: \"my-project\"/fleetProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
71+
sed -i '' -e "s/vpcProjectId: \"my-project\"/vpcProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
72+
else
73+
sed -i -e "s/clustersProjectId: \"my-project\"/clustersProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
74+
sed -i -e "s/fleetProjectId: \"my-project\"/fleetProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
75+
sed -i -e "s/vpcProjectId: \"my-project\"/vpcProjectId: \"${GKE_PROJECT_ID}\"/g" config.yaml
76+
fi
77+
```
78+
79+
4. **Run the gkekitctl create command from this directory.** This will take about 15 minutes to run.
80+
```bash
81+
./gkekitctl apply --config config.yaml
82+
```
83+
84+
5. **Connect to your newly-created GKE clusters**
85+
86+
```bash
87+
gcloud container clusters get-credentials mccp-central-01 --region us-central1 --project ${GKE_PROJECT_ID}
88+
```
89+
90+
6. **We highly recommend installing [kubectx and kubens](https://github.com/ahmetb/kubectx) to switch kubectl contexts between clusters with ease. Once done, you can validate you clusters like so.**
91+
```bash
92+
kubectx mccp-central-01=gke_${GKE_PROJECT_ID}_us-central1_mccp-central-01
93+
kubectl get nodes
94+
```
95+
96+
*Expected output for each cluster*:
97+
```bash
98+
NAME STATUS ROLES AGE VERSION
99+
gke-mccp-central-01-linux-gke-toolkit-poo-12b0fa78-grhw Ready <none> 11m v1.21.6-gke.1500
100+
gke-mccp-central-01-linux-gke-toolkit-poo-24d712a2-jm5g Ready <none> 11m v1.21.6-gke.1500
101+
gke-mccp-central-01-linux-gke-toolkit-poo-6fb11d07-h6xb Ready <none> 11m v1.21.6-gke.1500
102+
```
103+
7. **Now we are going to delete the app clusters you created for a better demo flow.**
104+
```bash
105+
## Ensure the mccp cluster is the ingress config controller
106+
gcloud container fleet ingress update --config-membership=mccp-central-01-membership -q
107+
108+
## Unregister the app clusters from the Fleet
109+
gcloud container fleet memberships delete gke-std-west01-membership --project ${GKE_PROJECT_ID} -q
110+
gcloud container fleet memberships delete gke-std-east01-membership --project ${GKE_PROJECT_ID} -q
111+
112+
## Delete the app clusters
113+
gcloud container clusters delete gke-std-west01 --region us-west1 --project ${GKE_PROJECT_ID} -q --async
114+
gcloud container clusters delete gke-std-east01 --region us-east1 --project ${GKE_PROJECT_ID} -q --async
115+
```
116+
## Fleet Cluster setup
117+
So far we have the infrastructure laid out and now need to set up the multi cluster controller cluster with argocd, GKE Fleet components, and some other tooling needed for the demo.
118+
119+
1. **Hydrate those configs with our project specific variable by running the Fleet prep script**
120+
```bash
121+
# Run the Fleet Prep script
122+
cd ${ROOT_DIR}
123+
./scripts/fleet_prep.sh -p ${GKE_PROJECT_ID}
124+
```
125+
126+
## Promoting Application Clusters to the Fleet
127+
Now that we have the multi cluster controller cluster setup, we need to create and promote a GKE cluster to the Fleet that will run applications. Since the multi cluster networking configs have been hydrating, adding a cluster with the environment=prod label in the ConfiSync cluster obect will ensure the new cluster syncs all the baseline tooling it needs, including ASM Gateways. We have also labeled this first cluster as a wave one cluster. The wave will be leveraged once apps start getting added.
128+
129+
1. **Run the application cluster add script**
130+
```bash
131+
./scripts/fleet_cluster_add.sh -p ${GKE_PROJECT_ID} -n gke-ap-west01 -l us-west1 -c "172.16.10.0/28" -t "autopilot" -w one
132+
```
133+
134+
2. **Browse to the ConfigSync UI and you will see that the configs in subfolders in the app-clusters-config folder are installing. This state is all driven by the app clusters tooling application set which targets clusters labeled as prod.**
135+
136+
## Creating a new app from the app template
137+
One application cluster is ready to serve apps. Now all we need to do is create configs for a new app and push them up to the ConfigSync sync repo and all the prep we have done will simply allow this app to start serving traffic through the ASM gateway.
138+
139+
1. **Run the team_app_add script**
140+
```bash
141+
./scripts/team_app_add.sh -a whereami -i "gcr.io/google-samples/whereami:v1.2.6" -p ${GKE_PROJECT_ID} -t team-2 -h "whereami.endpoints.${GKE_PROJECT_ID}.cloud.goog"
142+
```
143+
144+
2. **Take a peek at the GKE workloads UI, filter by the whereami app namespaces for easier location of applications, and you will see that the whereami app is starting to rollout to all application servers labeled as wave-one (there is only one at this point).**
145+
146+
3. **Once the whereami pods have started navigate to it's endpoint and you will see that you are routed to a pod living in the us-west region. You can also curl the endpoint to the same effect.**
147+
```bash
148+
curl https://whereami.endpoints.${GKE_PROJECT_ID}.cloud.goog/
149+
# The output should look something like this...
150+
{
151+
"cluster_name": "gke-std-west02",
152+
"host_header": "whereami.endpoints.argo-spike.cloud.goog",
153+
"pod_name": "whereami-rollout-6d6cb979b5-5xzpj",
154+
"pod_name_emoji": "🇨🇵",
155+
"project_id": "argo-spike",
156+
"timestamp": "2022-08-01T16:16:56",
157+
"zone": "us-west1-b"
158+
}
159+
```
160+
161+
## Add another application cluster to the Fleet
162+
Let's get another application cluster added to the Fleet. This time we will deploy the cluster to us-east and label it as a wave two cluster.
163+
164+
1. **Run the application cluster add script**
165+
```bash
166+
./scripts/fleet_cluster_add.sh -p ${GKE_PROJECT_ID} -n gke-ap-east01 -l us-east1-b -c "172.16.11.0/28" -t "autopilot" -w two
167+
```
168+
169+
2. **Once the whereami pods have started on the us-east cluster, refresh the endpoint webpage or curl it again and you will see that you are routed to a pod living in the region that is closest to you. If you are closer to the west coast and want to see the east coast pod in action you can deploy a GCE instance in the east coast and curl from there or feel free to spin up a curl container in the us-east cluster and curl the endpoint from there.**
170+
```bash
171+
curl https://whereami.endpoints.${GKE_PROJECT_ID}.cloud.goog/
172+
# The output should look something like this...
173+
{
174+
"cluster_name": "gke-std-east01",
175+
"host_header": "whereami.endpoints.argo-spike.cloud.goog",
176+
"pod_name": "whereami-rollout-6d6cb979b5-x9h4v",
177+
"pod_name_emoji": "🧍🏽",
178+
"project_id": "argo-spike",
179+
"timestamp": "2022-08-01T16:23:42",
180+
"zone": "us-east1-b"
181+
}
182+
```
183+
184+
## Rolling out new version of an app
185+
So far we have added an application cluster to the Fleet and new apps to those clusters. We've not shown off the usage of the wave label just yet, so we will do that now. First we need to create a new app that does a better job showing off Argo rollouts. Then we will progressively release the app to wave one followed by wave two clusters with a manual gate in between.
186+
187+
1. **Run the team_app_add script**
188+
```bash
189+
./scripts/team_app_add.sh -a rollout-demo -i "argoproj/rollouts-demo:green" -p ${GKE_PROJECT_ID} -t team-1 -h "rollout-demo.endpoints.${GKE_PROJECT_ID}.cloud.goog"
190+
```
191+
192+
2. **Release a new image of your app to wave one clusters**
193+
```bash
194+
./scripts/team_app_rollout.sh -a rollout-demo -t team-1 -i "argoproj/rollouts-demo" -l "yellow" -w "one"
195+
```
196+
197+
3. **Check the state of your rollout in the argocd UI. You should see a new replicaset and pods being deployed with the new image tag and a progression through the steps of the rollout that generates an analysis templates result after each step. If the analysis does not pass, the rollout will stop and all traffic will be sent to the previous version.**
198+
199+
200+
4. **Now that we have progressively release our new image to the first wave of clusters successfully we can move on to releasing the new image to wave two clusters.**
201+
```bash
202+
./scripts/team_app_rollout.sh -a rollout-demo -t team-1 -i "argoproj/rollouts-demo" -l "yellow" -w "two"
203+
```
204+
205+
5. **All of the waves have been rolled out successfully and we need to merge the new image into main to conclude the rollout**
206+
```bash
207+
./scripts/team_app_rollout.sh -a rollout-demo -t team-1 -i "argoproj/rollouts-demo" -l "yellow" -w "done"
208+
```
209+
210+

0 commit comments

Comments
 (0)