diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c54f5a39..a231011d 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -39,3 +39,21 @@ steps: propagate-environment: true volumes: - "/var/run/docker.sock:/var/run/docker.sock" + + - name: "VTAdmin Test" + command: + - apk add g++ make bash curl mysql mysql-client chromium + - wget https://golang.org/dl/go1.17.2.linux-amd64.tar.gz + - tar -C /usr/local -xzf go1.17.2.linux-amd64.tar.gz + - export PATH=$PATH:/usr/local/go/bin + - rm go1.17.2.linux-amd64.tar.gz + - make vtadmin-test + concurrency: 1 + concurrency_group: 'vtop/vtadmin-test' + timeout_in_minutes: 60 + plugins: + - docker#v3.12.0: + image: "docker:latest" + propagate-environment: true + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" diff --git a/Makefile b/Makefile index c31c7b1f..817058b5 100644 --- a/Makefile +++ b/Makefile @@ -67,3 +67,7 @@ upgrade-test: build e2e-test-setup backup-restore-test: build e2e-test-setup echo "Running Backup-Restore test" test/endtoend/backup_restore_test.sh + +vtadmin-test: build e2e-test-setup + echo "Running VtAdmin test" + test/endtoend/vtadmin_test.sh diff --git a/deploy/crds/planetscale.com_vitesscells.yaml b/deploy/crds/planetscale.com_vitesscells.yaml index 84429175..8e86a3a3 100644 --- a/deploy/crds/planetscale.com_vitesscells.yaml +++ b/deploy/crds/planetscale.com_vitesscells.yaml @@ -254,6 +254,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: diff --git a/deploy/crds/planetscale.com_vitessclusters.yaml b/deploy/crds/planetscale.com_vitessclusters.yaml index 20fd6c57..44e147d0 100644 --- a/deploy/crds/planetscale.com_vitessclusters.yaml +++ b/deploy/crds/planetscale.com_vitessclusters.yaml @@ -901,6 +901,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: @@ -934,6 +936,8 @@ spec: type: object mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: @@ -1989,6 +1993,184 @@ spec: tolerations: x-kubernetes-preserve-unknown-fields: true type: object + vtadmin: + properties: + affinity: + x-kubernetes-preserve-unknown-fields: true + annotations: + additionalProperties: + type: string + type: object + apiAddresses: + items: + type: string + type: array + apiResources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + cells: + items: + type: string + type: array + extraEnv: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + extraFlags: + additionalProperties: + type: string + type: object + extraLabels: + additionalProperties: + type: string + type: object + extraVolumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + extraVolumes: + x-kubernetes-preserve-unknown-fields: true + initContainers: + x-kubernetes-preserve-unknown-fields: true + rbac: + properties: + key: + type: string + name: + type: string + volumeName: + type: string + required: + - key + type: object + readOnly: + type: boolean + replicas: + format: int32 + type: integer + service: + properties: + annotations: + additionalProperties: + type: string + type: object + clusterIP: + type: string + type: object + sidecarContainers: + x-kubernetes-preserve-unknown-fields: true + tolerations: + x-kubernetes-preserve-unknown-fields: true + webResources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + required: + - apiAddresses + type: object required: - cells type: object @@ -2087,6 +2269,13 @@ spec: serviceName: type: string type: object + vtadmin: + properties: + available: + type: string + serviceName: + type: string + type: object type: object type: object served: true diff --git a/deploy/crds/planetscale.com_vitesskeyspaces.yaml b/deploy/crds/planetscale.com_vitesskeyspaces.yaml index f06f4419..6917bcde 100644 --- a/deploy/crds/planetscale.com_vitesskeyspaces.yaml +++ b/deploy/crds/planetscale.com_vitesskeyspaces.yaml @@ -176,6 +176,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: diff --git a/deploy/crds/planetscale.com_vitessshards.yaml b/deploy/crds/planetscale.com_vitessshards.yaml index ed460544..af21d5df 100644 --- a/deploy/crds/planetscale.com_vitessshards.yaml +++ b/deploy/crds/planetscale.com_vitessshards.yaml @@ -187,6 +187,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: diff --git a/docs/api/index.html b/docs/api/index.html index 9b4d7f20..e0b77564 100644 --- a/docs/api/index.html +++ b/docs/api/index.html @@ -370,6 +370,19 @@
vtadmin
+
+
+VtAdminSpec
+
+
+VtAdmin deploys a set of Vitess Admin servers for the Vitess cluster.
+cells
@@ -1742,7 +1755,8 @@
SecretSource specifies where to find the data for a particular secret value.
@@ -1811,7 +1825,8 @@
ServiceOverrides allows customization of an arbitrary Service object.
@@ -3577,6 +3592,19 @@vtadmin
+
+
+VtAdminSpec
+
+
+VtAdmin deploys a set of Vitess Admin servers for the Vitess cluster.
+cells
@@ -3752,6 +3780,19 @@ vtadmin
+
+
+VtadminStatus
+
+
+Vtadmin is a summary of the status of the vtadmin deployment.
+cells
@@ -4335,6 +4376,19 @@ vtadmin
+
+
+Kubernetes core/v1.PullPolicy
+
+
+Vtadmin is the container image pull policy to use for Vtadmin instances.
+vtorc
@@ -4443,6 +4497,17 @@ vtadmin
+
+string
+
+Vtadmin is the container image (including version tag) to use for Vitess Admin instances.
+vtorc
string
@@ -7439,6 +7504,317 @@ +(Appears on: +VitessClusterSpec) +
++
VtAdminSpec specifies deployment parameters for vtadmin.
+ +| Field | +Description | +
|---|---|
+rbac
+
+
+SecretSource
+
+
+ |
+
+ Rbac contains the rbac config file for vtadmin. +If it is omitted, then it is considered to disable rbac. + |
+
+cells
+
+[]string
+
+ |
+
+ Cells is a list of cell names (as defined in the Cells list) +in which to deploy vtadmin. +Default: Deploy to all defined cells. + |
+
+apiAddresses
+
+[]string
+
+ |
+
+ APIAddresses is a list of vtadmin api addresses +to be used by the vtadmin web for each cell +Either there should be only 1 element in the list +which is used by all the vtadmin-web deployments +or it should match the length of the Cells list + |
+
+replicas
+
+int32
+
+ |
+
+ Replicas is the number of vtadmin instances to deploy in each cell. + |
+
+webResources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ WebResources determines the compute resources reserved for each vtadmin-web replica. + |
+
+apiResources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+ APIResources determines the compute resources reserved for each vtadmin-api replica. + |
+
+readOnly
+
+bool
+
+ |
+
+ ReadOnly specifies whether the web UI should be read-only +or should it allow users to take actions +Default: false. + |
+
+extraFlags
+
+map[string]string
+
+ |
+
+ ExtraFlags can optionally be used to override default flags set by the +operator, or pass additional flags to vtadmin-api. All entries must be +key-value string pairs of the form “flag”: “value”. The flag name should +not have any prefix (just “flag”, not “-flag”). To set a boolean flag, +set the string value to either “true” or “false”. + |
+
+extraEnv
+
+
+[]Kubernetes core/v1.EnvVar
+
+
+ |
+
+ ExtraEnv can optionally be used to override default environment variables +set by the operator, or pass additional environment variables. + |
+
+extraVolumes
+
+
+[]Kubernetes core/v1.Volume
+
+
+ |
+
+ ExtraVolumes can optionally be used to override default Pod volumes +defined by the operator, or provide additional volumes to the Pod. +Note that when adding a new volume, you should usually also add a +volumeMount to specify where in each container’s filesystem the volume +should be mounted. + |
+
+extraVolumeMounts
+
+
+[]Kubernetes core/v1.VolumeMount
+
+
+ |
+
+ ExtraVolumeMounts can optionally be used to override default Pod +volumeMounts defined by the operator, or specify additional mounts. +Typically, these are used to mount volumes defined through extraVolumes. + |
+
+initContainers
+
+
+[]Kubernetes core/v1.Container
+
+
+ |
+
+ InitContainers can optionally be used to supply extra init containers +that will be run to completion one after another before any app containers are started. + |
+
+sidecarContainers
+
+
+[]Kubernetes core/v1.Container
+
+
+ |
+
+ SidecarContainers can optionally be used to supply extra containers +that run alongside the main containers. + |
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+ |
+
+ Affinity allows you to set rules that constrain the scheduling of +your vtadmin pods. WARNING: These affinity rules will override all default affinities +that we set; in turn, we can’t guarantee optimal scheduling of your pods if you +choose to set this field. + |
+
+annotations
+
+map[string]string
+
+ |
+
+ Annotations can optionally be used to attach custom annotations to Pods +created for this component. These will be attached to the underlying +Pods that the vtadmin Deployment creates. + |
+
+extraLabels
+
+map[string]string
+
+ |
+
+ ExtraLabels can optionally be used to attach custom labels to Pods +created for this component. These will be attached to the underlying +Pods that the vtadmin Deployment creates. + |
+
+service
+
+
+ServiceOverrides
+
+
+ |
+
+ Service can optionally be used to customize the vtadmin Service. + |
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+ |
+
+ Tolerations allow you to schedule pods onto nodes with matching taints. + |
+
+(Appears on: +VitessClusterStatus) +
++
VtadminStatus is a summary of the status of the vtadmin deployment.
+ +| Field | +Description | +
|---|---|
+available
+
+
+Kubernetes core/v1.ConditionStatus
+
+
+ |
+
+ Available indicates whether the vtadmin service has available endpoints. + |
+
+serviceName
+
+string
+
+ |
+
+ ServiceName is the name of the Service for this cluster’s vtadmin. + |
+
diff --git a/pkg/apis/planetscale/v2/defaults.go b/pkg/apis/planetscale/v2/defaults.go index 12cbf045..91e452c7 100644 --- a/pkg/apis/planetscale/v2/defaults.go +++ b/pkg/apis/planetscale/v2/defaults.go @@ -67,6 +67,10 @@ const ( defaultVtctldCPUMillis = 100 defaultVtctldMemoryBytes = 128 * Mi + defaultVtadminReplicas = 1 + defaultVtadminCPUMillis = 100 + defaultVtadminMemoryBytes = 128 * Mi + defaultVtorcReplicas = 0 defaultVtorcCPUMillis = 100 defaultVtorcMemoryBytes = 128 * Mi @@ -82,6 +86,8 @@ const ( // DefaultWebPort is the port for debug status pages and dashboard UIs. DefaultWebPort = 15000 + // DefaultAPIPort is the port for API endpoint. + DefaultAPIPort = 15001 // DefaultGrpcPort is the port for RPCs. DefaultGrpcPort = 15999 // DefaultMysqlPort is the port for MySQL client connections. @@ -92,6 +98,8 @@ const ( // DefaultWebPortName is the name for the web port. DefaultWebPortName = "web" + // DefaultAPIPortName is the name for the api port. + DefaultAPIPortName = "api" // DefaultGrpcPortName is the name for the RPC port. DefaultGrpcPortName = "grpc" // DefaultMysqlPortName is the name for the MySQL port. diff --git a/pkg/apis/planetscale/v2/labels.go b/pkg/apis/planetscale/v2/labels.go index 9cb9a466..869147a0 100644 --- a/pkg/apis/planetscale/v2/labels.go +++ b/pkg/apis/planetscale/v2/labels.go @@ -40,6 +40,8 @@ const ( // VtctldComponentName is the ComponentLabel value for vtctld. VtctldComponentName = "vtctld" + // VtadminComponentName is the ComponentLabel value for vtadmin. + VtadminComponentName = "vtadmin" // VtorcComponentName is the ComponentLabel value for vtorc. VtorcComponentName = "vtorc" // VtgateComponentName is the ComponentLabel value for vtgate. diff --git a/pkg/apis/planetscale/v2/vitesscluster_defaults.go b/pkg/apis/planetscale/v2/vitesscluster_defaults.go index 3f0f319a..dd03e753 100644 --- a/pkg/apis/planetscale/v2/vitesscluster_defaults.go +++ b/pkg/apis/planetscale/v2/vitesscluster_defaults.go @@ -27,6 +27,7 @@ func DefaultVitessCluster(vt *VitessCluster) { defaultGlobalLockserver(vt) DefaultVitessImages(&vt.Spec.Images, DefaultImages) DefaultVitessDashboard(&vt.Spec.VitessDashboard) + DefaultVtAdmin(&vt.Spec.VtAdmin) DefaultVitessKeyspaceTemplates(vt.Spec.Keyspaces) defaultClusterBackup(vt.Spec.Backup) DefaultTopoReconcileConfig(&vt.Spec.TopologyReconciliation) @@ -55,6 +56,9 @@ func DefaultVitessImages(dst *VitessImages, src *VitessImages) { if dst.Vtctld == "" { dst.Vtctld = src.Vtctld } + if dst.Vtadmin == "" { + dst.Vtadmin = src.Vtadmin + } if dst.Vtorc == "" { dst.Vtorc = src.Vtorc } @@ -96,6 +100,39 @@ func DefaultVitessDashboard(dashboard **VitessDashboardSpec) { DefaultServiceOverrides(&(*dashboard).Service) } +func DefaultVtAdmin(dashboard **VtAdminSpec) { + // Do not deploy vtadmin if not specified. + if *dashboard == nil { + return + } + if (*dashboard).Replicas == nil { + (*dashboard).Replicas = pointer.Int32Ptr(defaultVtadminReplicas) + } + if len((*dashboard).WebResources.Requests) == 0 { + (*dashboard).WebResources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(defaultVtadminCPUMillis, resource.DecimalSI), + corev1.ResourceMemory: *resource.NewQuantity(defaultVtadminMemoryBytes, resource.BinarySI), + } + } + if len((*dashboard).WebResources.Limits) == 0 { + (*dashboard).WebResources.Limits = corev1.ResourceList{ + corev1.ResourceMemory: *resource.NewQuantity(defaultVtadminMemoryBytes, resource.BinarySI), + } + } + if len((*dashboard).APIResources.Requests) == 0 { + (*dashboard).APIResources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(defaultVtadminCPUMillis, resource.DecimalSI), + corev1.ResourceMemory: *resource.NewQuantity(defaultVtadminMemoryBytes, resource.BinarySI), + } + } + if len((*dashboard).APIResources.Limits) == 0 { + (*dashboard).APIResources.Limits = corev1.ResourceList{ + corev1.ResourceMemory: *resource.NewQuantity(defaultVtadminMemoryBytes, resource.BinarySI), + } + } + DefaultServiceOverrides(&(*dashboard).Service) +} + func DefaultVitessKeyspaceTemplates(keyspaces []VitessKeyspaceTemplate) { for i := range keyspaces { DefaultVitessKeyspaceTemplate(&keyspaces[i]) diff --git a/pkg/apis/planetscale/v2/vitesscluster_types.go b/pkg/apis/planetscale/v2/vitesscluster_types.go index c5f44306..8a76cf29 100644 --- a/pkg/apis/planetscale/v2/vitesscluster_types.go +++ b/pkg/apis/planetscale/v2/vitesscluster_types.go @@ -76,6 +76,9 @@ type VitessClusterSpec struct { // Dashboard deploys a set of Vitess Dashboard servers (vtctld) for the Vitess cluster. VitessDashboard *VitessDashboardSpec `json:"vitessDashboard,omitempty"` + // VtAdmin deploys a set of Vitess Admin servers for the Vitess cluster. + VtAdmin *VtAdminSpec `json:"vtadmin,omitempty"` + // Cells is a list of templates for VitessCells to create for this cluster. // // Each VitessCell represents a set of Nodes in a given failure domain, @@ -221,6 +224,8 @@ type VitessImages struct { // Vtctld is the container image (including version tag) to use for Vitess Dashboard instances. Vtctld string `json:"vtctld,omitempty"` + // Vtadmin is the container image (including version tag) to use for Vitess Admin instances. + Vtadmin string `json:"vtadmin,omitempty"` // Vtorc is the container image (including version tag) to use for Vitess Orchestrator instances. Vtorc string `json:"vtorc,omitempty"` // Vtgate is the container image (including version tag) to use for Vitess Gateway instances. @@ -261,6 +266,8 @@ type MysqldImage struct { type VitessImagePullPolicies struct { // Vtctld is the container image pull policy to use for Vitess Dashboard instances. Vtctld corev1.PullPolicy `json:"vtctld,omitempty"` + // Vtadmin is the container image pull policy to use for Vtadmin instances. + Vtadmin corev1.PullPolicy `json:"vtadmin,omitempty"` // Vtorc is the container image pull policy to use for Vitess Orchestrator instances. Vtorc corev1.PullPolicy `json:"vtorc,omitempty"` // Vtgate is the container image pull policy to use for Vitess Gateway instances. @@ -422,6 +429,103 @@ type VitessDashboardSpec struct { Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } +// VtAdminSpec specifies deployment parameters for vtadmin. +type VtAdminSpec struct { + // Rbac contains the rbac config file for vtadmin. + // If it is omitted, then it is considered to disable rbac. + Rbac *SecretSource `json:"rbac,omitempty"` + + // Cells is a list of cell names (as defined in the Cells list) + // in which to deploy vtadmin. + // Default: Deploy to all defined cells. + Cells []string `json:"cells,omitempty"` + + // APIAddresses is a list of vtadmin api addresses + // to be used by the vtadmin web for each cell + // Either there should be only 1 element in the list + // which is used by all the vtadmin-web deployments + // or it should match the length of the Cells list + APIAddresses []string `json:"apiAddresses"` + + // Replicas is the number of vtadmin instances to deploy in each cell. + Replicas *int32 `json:"replicas,omitempty"` + + // WebResources determines the compute resources reserved for each vtadmin-web replica. + WebResources corev1.ResourceRequirements `json:"webResources,omitempty"` + + // APIResources determines the compute resources reserved for each vtadmin-api replica. + APIResources corev1.ResourceRequirements `json:"apiResources,omitempty"` + + // ReadOnly specifies whether the web UI should be read-only + // or should it allow users to take actions + // + // Default: false. + ReadOnly *bool `json:"readOnly,omitempty"` + + // ExtraFlags can optionally be used to override default flags set by the + // operator, or pass additional flags to vtadmin-api. All entries must be + // key-value string pairs of the form "flag": "value". The flag name should + // not have any prefix (just "flag", not "-flag"). To set a boolean flag, + // set the string value to either "true" or "false". + ExtraFlags map[string]string `json:"extraFlags,omitempty"` + + // ExtraEnv can optionally be used to override default environment variables + // set by the operator, or pass additional environment variables. + ExtraEnv []corev1.EnvVar `json:"extraEnv,omitempty"` + + // ExtraVolumes can optionally be used to override default Pod volumes + // defined by the operator, or provide additional volumes to the Pod. + // Note that when adding a new volume, you should usually also add a + // volumeMount to specify where in each container's filesystem the volume + // should be mounted. + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + ExtraVolumes []corev1.Volume `json:"extraVolumes,omitempty"` + + // ExtraVolumeMounts can optionally be used to override default Pod + // volumeMounts defined by the operator, or specify additional mounts. + // Typically, these are used to mount volumes defined through extraVolumes. + ExtraVolumeMounts []corev1.VolumeMount `json:"extraVolumeMounts,omitempty"` + + // InitContainers can optionally be used to supply extra init containers + // that will be run to completion one after another before any app containers are started. + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + InitContainers []corev1.Container `json:"initContainers,omitempty"` + + // SidecarContainers can optionally be used to supply extra containers + // that run alongside the main containers. + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"` + + // Affinity allows you to set rules that constrain the scheduling of + // your vtadmin pods. WARNING: These affinity rules will override all default affinities + // that we set; in turn, we can't guarantee optimal scheduling of your pods if you + // choose to set this field. + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // Annotations can optionally be used to attach custom annotations to Pods + // created for this component. These will be attached to the underlying + // Pods that the vtadmin Deployment creates. + Annotations map[string]string `json:"annotations,omitempty"` + + // ExtraLabels can optionally be used to attach custom labels to Pods + // created for this component. These will be attached to the underlying + // Pods that the vtadmin Deployment creates. + ExtraLabels map[string]string `json:"extraLabels,omitempty"` + + // Service can optionally be used to customize the vtadmin Service. + Service *ServiceOverrides `json:"service,omitempty"` + + // Tolerations allow you to schedule pods onto nodes with matching taints. + // +kubebuilder:validation:Schemaless + // +kubebuilder:pruning:PreserveUnknownFields + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + // ServiceOverrides allows customization of an arbitrary Service object. type ServiceOverrides struct { // Annotations specifies extra annotations to add to the Service object. @@ -444,6 +548,14 @@ type VitessDashboardStatus struct { ServiceName string `json:"serviceName,omitempty"` } +// VtadminStatus is a summary of the status of the vtadmin deployment. +type VtadminStatus struct { + // Available indicates whether the vtadmin service has available endpoints. + Available corev1.ConditionStatus `json:"available,omitempty"` + // ServiceName is the name of the Service for this cluster's vtadmin. + ServiceName string `json:"serviceName,omitempty"` +} + // VitessClusterStatus defines the observed state of VitessCluster type VitessClusterStatus struct { // The generation observed by the controller. @@ -458,6 +570,9 @@ type VitessClusterStatus struct { // VitessDashboard is a summary of the status of the vtctld deployment. VitessDashboard VitessDashboardStatus `json:"vitessDashboard,omitempty"` + // Vtadmin is a summary of the status of the vtadmin deployment. + Vtadmin VtadminStatus `json:"vtadmin,omitempty"` + // Cells is a summary of the status of desired cells. Cells map[string]VitessClusterCellStatus `json:"cells,omitempty"` // Keyspaces is a summary of the status of desired keyspaces. @@ -475,6 +590,9 @@ func NewVitessClusterStatus() VitessClusterStatus { VitessDashboard: VitessDashboardStatus{ Available: corev1.ConditionUnknown, }, + Vtadmin: VtadminStatus{ + Available: corev1.ConditionUnknown, + }, Cells: make(map[string]VitessClusterCellStatus), Keyspaces: make(map[string]VitessClusterKeyspaceStatus), OrphanedCells: make(map[string]OrphanStatus), diff --git a/pkg/apis/planetscale/v2/zz_generated.deepcopy.go b/pkg/apis/planetscale/v2/zz_generated.deepcopy.go index bb0e4f1b..9de3c3c4 100644 --- a/pkg/apis/planetscale/v2/zz_generated.deepcopy.go +++ b/pkg/apis/planetscale/v2/zz_generated.deepcopy.go @@ -1251,6 +1251,11 @@ func (in *VitessClusterSpec) DeepCopyInto(out *VitessClusterSpec) { *out = new(VitessDashboardSpec) (*in).DeepCopyInto(*out) } + if in.VtAdmin != nil { + in, out := &in.VtAdmin, &out.VtAdmin + *out = new(VtAdminSpec) + (*in).DeepCopyInto(*out) + } if in.Cells != nil { in, out := &in.Cells, &out.Cells *out = make([]VitessCellTemplate, len(*in)) @@ -1309,6 +1314,7 @@ func (in *VitessClusterStatus) DeepCopyInto(out *VitessClusterStatus) { *out = *in in.GlobalLockserver.DeepCopyInto(&out.GlobalLockserver) out.VitessDashboard = in.VitessDashboard + out.Vtadmin = in.Vtadmin if in.Cells != nil { in, out := &in.Cells, &out.Cells *out = make(map[string]VitessClusterCellStatus, len(*in)) @@ -2470,6 +2476,136 @@ func (in *VitessTabletStatus) DeepCopy() *VitessTabletStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VtAdminSpec) DeepCopyInto(out *VtAdminSpec) { + *out = *in + if in.Rbac != nil { + in, out := &in.Rbac, &out.Rbac + *out = new(SecretSource) + **out = **in + } + if in.Cells != nil { + in, out := &in.Cells, &out.Cells + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIAddresses != nil { + in, out := &in.APIAddresses, &out.APIAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.WebResources.DeepCopyInto(&out.WebResources) + in.APIResources.DeepCopyInto(&out.APIResources) + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.ExtraFlags != nil { + in, out := &in.ExtraFlags, &out.ExtraFlags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraEnv != nil { + in, out := &in.ExtraEnv, &out.ExtraEnv + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtraVolumes != nil { + in, out := &in.ExtraVolumes, &out.ExtraVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtraVolumeMounts != nil { + in, out := &in.ExtraVolumeMounts, &out.ExtraVolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SidecarContainers != nil { + in, out := &in.SidecarContainers, &out.SidecarContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraLabels != nil { + in, out := &in.ExtraLabels, &out.ExtraLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceOverrides) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VtAdminSpec. +func (in *VtAdminSpec) DeepCopy() *VtAdminSpec { + if in == nil { + return nil + } + out := new(VtAdminSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VtadminStatus) DeepCopyInto(out *VtadminStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VtadminStatus. +func (in *VtadminStatus) DeepCopy() *VtadminStatus { + if in == nil { + return nil + } + out := new(VtadminStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VttabletSpec) DeepCopyInto(out *VttabletSpec) { *out = *in diff --git a/pkg/controller/vitesscluster/reconcile_vtadmin.go b/pkg/controller/vitesscluster/reconcile_vtadmin.go new file mode 100644 index 00000000..669ed889 --- /dev/null +++ b/pkg/controller/vitesscluster/reconcile_vtadmin.go @@ -0,0 +1,373 @@ +/* +Copyright 2022 PlanetScale Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vitesscluster + +import ( + "context" + "fmt" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2" + "planetscale.dev/vitess-operator/pkg/operator/conditions" + "planetscale.dev/vitess-operator/pkg/operator/reconciler" + "planetscale.dev/vitess-operator/pkg/operator/results" + "planetscale.dev/vitess-operator/pkg/operator/update" + "planetscale.dev/vitess-operator/pkg/operator/vitesscell" + "planetscale.dev/vitess-operator/pkg/operator/vtadmin" + "planetscale.dev/vitess-operator/pkg/operator/vtctld" + "planetscale.dev/vitess-operator/pkg/operator/vtgate" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func (r *ReconcileVitessCluster) reconcileVtadmin(ctx context.Context, vt *planetscalev2.VitessCluster) (reconcile.Result, error) { + resultBuilder := results.Builder{} + // Do not deploy vtadmin if not configured + if vt.Spec.VtAdmin == nil { + return resultBuilder.Result() + } + + // Some checks to validate user input + if len(vt.Spec.Images.Vtadmin) == 0 { + log.Error("Not deploying vtadmin since image is unspecified") + return resultBuilder.Result() + } + + if len(vt.Spec.VtAdmin.APIAddresses) == 0 { + log.Errorf("Not deploying vtadmin since api addresses field is not specified. Atleast 1 value is required") + } + + if len(vt.Spec.VtAdmin.APIAddresses) != 1 && len(vt.Spec.VtAdmin.APIAddresses) != len(vt.Spec.VtAdmin.Cells) { + log.Errorf("Not deploying vtadmin since api addresses field doesn't align with cells field") + } + + key := client.ObjectKey{Namespace: vt.Namespace, Name: vtadmin.ServiceName(vt.Name)} + labels := map[string]string{ + planetscalev2.ClusterLabel: vt.Name, + planetscalev2.ComponentLabel: planetscalev2.VtadminComponentName, + } + + // Reconcile vtadmin Service. + err := r.reconciler.ReconcileObject(ctx, vt, key, labels, true, reconciler.Strategy{ + Kind: &corev1.Service{}, + + New: func(key client.ObjectKey) runtime.Object { + svc := vtadmin.NewService(key, labels) + update.ServiceOverrides(svc, vt.Spec.VtAdmin.Service) + return svc + }, + UpdateInPlace: func(key client.ObjectKey, obj runtime.Object) { + svc := obj.(*corev1.Service) + vtadmin.UpdateService(svc, labels) + update.InPlaceServiceOverrides(svc, vt.Spec.VtAdmin.Service) + }, + Status: func(key client.ObjectKey, obj runtime.Object) { + svc := obj.(*corev1.Service) + vt.Status.Vtadmin.ServiceName = svc.Name + }, + }) + if err != nil { + // Record error but continue. + resultBuilder.Error(err) + } + + // Reconcile vtadmin Deployments. + specs, err := r.vtadminSpecs(ctx, vt, labels) + if err != nil { + // Record error and stop. + resultBuilder.Error(err) + return resultBuilder.Result() + } + + // Generate keys (object names) for all desired vtadmin Deployments. + // Keep a map back from generated names to the vtadmin specs. + keys := make([]client.ObjectKey, 0, len(specs)) + specMap := make(map[client.ObjectKey]*vtadmin.Spec, len(specs)) + for _, spec := range specs { + key := client.ObjectKey{Namespace: vt.Namespace, Name: vtadmin.DeploymentName(vt.Name, spec.Cell.Name)} + keys = append(keys, key) + specMap[key] = spec + } + + err = r.reconciler.ReconcileObjectSet(ctx, vt, keys, labels, reconciler.Strategy{ + Kind: &appsv1.Deployment{}, + + New: func(key client.ObjectKey) runtime.Object { + return vtadmin.NewDeployment(key, specMap[key]) + }, + UpdateInPlace: func(key client.ObjectKey, obj runtime.Object) { + newObj := obj.(*appsv1.Deployment) + if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType { + vtadmin.UpdateDeployment(newObj, specMap[key]) + return + } + vtadmin.UpdateDeploymentImmediate(newObj, specMap[key]) + }, + UpdateRollingInPlace: func(key client.ObjectKey, obj runtime.Object) { + newObj := obj.(*appsv1.Deployment) + vtadmin.UpdateDeployment(newObj, specMap[key]) + }, + Status: func(key client.ObjectKey, obj runtime.Object) { + // This function will get called once for each Deployment. + // Aggregate as we go to build an overall status for vtadmin. + curObj := obj.(*appsv1.Deployment) + + // We'll say vtadmin is Available overall if any of the Deployments is available. + // The important thing is that somebody will answer when a client hits the Service. + if available := conditions.Deployment(curObj.Status.Conditions, appsv1.DeploymentAvailable); available != nil { + // Update the overall status if either we found one that's True, or we previously knew nothing at all (Unknown). + if available.Status == corev1.ConditionTrue || vt.Status.Vtadmin.Available == corev1.ConditionUnknown { + vt.Status.Vtadmin.Available = available.Status + } + } + + // TODO(enisoc): Aggregate other important parts of status besides conditions. + }, + }) + if err != nil { + resultBuilder.Error(err) + } + + return resultBuilder.Result() +} + +func (r *ReconcileVitessCluster) vtadminSpecs(ctx context.Context, vt *planetscalev2.VitessCluster, parentLabels map[string]string) ([]*vtadmin.Spec, error) { + var cells []*planetscalev2.VitessCellTemplate + if len(vt.Spec.VtAdmin.Cells) != 0 { + // Deploy only to the specified cells. + for _, cellName := range vt.Spec.VtAdmin.Cells { + cell := vt.Spec.Cell(cellName) + if cell == nil { + r.recorder.Eventf(vt, corev1.EventTypeWarning, "InvalidSpec", "ignoring non-existent cell %q in spec.vtadmin.cells", cellName) + continue + } + cells = append(cells, cell) + } + } else { + // Deploy to all cells. + for i := range vt.Spec.Cells { + cells = append(cells, &vt.Spec.Cells[i]) + } + } + + // Make a vtadmin Deployment spec for each cell. + specs := make([]*vtadmin.Spec, 0, len(cells)) + for idx, cell := range cells { + // Copy parent labels map and add cell-specific label. + labels := make(map[string]string, len(parentLabels)+1) + for k, v := range parentLabels { + labels[k] = v + } + labels[planetscalev2.CellLabel] = cell.Name + + // Merge ExtraFlags into a new map. + extraFlags := make(map[string]string) + update.StringMap(&extraFlags, vt.Spec.VtAdmin.ExtraFlags) + + discoverySecret, err := r.createDiscoverySecret(ctx, vt, cell) + if err != nil { + return nil, err + } + + // We have already checked that atleast 1 value should be available in APIAddresses + apiAddress := vt.Spec.VtAdmin.APIAddresses[0] + if len(vt.Spec.VtAdmin.APIAddresses) > 1 { + apiAddress = vt.Spec.VtAdmin.APIAddresses[idx] + } + + webConfigSecret, err := r.createWebConfigSecret(ctx, vt, cell, apiAddress) + if err != nil { + return nil, err + } + + specs = append(specs, &vtadmin.Spec{ + Cell: cell, + Discovery: discoverySecret, + Rbac: vt.Spec.VtAdmin.Rbac, + WebConfig: webConfigSecret, + Image: vt.Spec.Images.Vtadmin, + ClusterName: vt.ObjectMeta.Name, + ImagePullPolicy: vt.Spec.ImagePullPolicies.Vtadmin, + ImagePullSecrets: vt.Spec.ImagePullSecrets, + Labels: labels, + Replicas: *vt.Spec.VtAdmin.Replicas, + APIResources: vt.Spec.VtAdmin.APIResources, + WebResources: vt.Spec.VtAdmin.WebResources, + Affinity: vt.Spec.VtAdmin.Affinity, + ExtraFlags: extraFlags, + ExtraEnv: vt.Spec.VtAdmin.ExtraEnv, + ExtraVolumes: vt.Spec.VtAdmin.ExtraVolumes, + ExtraVolumeMounts: vt.Spec.VtAdmin.ExtraVolumeMounts, + InitContainers: vt.Spec.VtAdmin.InitContainers, + SidecarContainers: vt.Spec.VtAdmin.SidecarContainers, + Annotations: vt.Spec.VtAdmin.Annotations, + ExtraLabels: vt.Spec.VtAdmin.ExtraLabels, + Tolerations: vt.Spec.VtAdmin.Tolerations, + }) + } + return specs, nil +} + +func (r *ReconcileVitessCluster) createDiscoverySecret(ctx context.Context, vt *planetscalev2.VitessCluster, cell *planetscalev2.VitessCellTemplate) (*planetscalev2.SecretSource, error) { + // Get the vtctld service + vtctldService := corev1.Service{} + err := r.client.Get(ctx, client.ObjectKey{ + Namespace: vt.Namespace, + Name: vtctld.ServiceName(vt.Name), + }, &vtctldService) + if err != nil { + return nil, err + } + + // Find the IP address from the service. This is randomly assigned. + vtctldServiceIP := vtctldService.Spec.ClusterIP + // The web and grpc ports should be set to the default values planetscalev2.DefaultWebPort and planetscalev2.DefaultGrpcPort + // respectively, but since we have the service, we can just read them. + var vtctldServiceWebPort, vtctldServiceGrpcPort int32 + for _, port := range vtctldService.Spec.Ports { + if port.Name == planetscalev2.DefaultWebPortName { + vtctldServiceWebPort = port.Port + } + if port.Name == planetscalev2.DefaultGrpcPortName { + vtctldServiceGrpcPort = port.Port + } + } + + // Read the cell information + vtc := planetscalev2.VitessCell{} + err = r.client.Get(ctx, client.ObjectKey{ + Namespace: vt.Namespace, + Name: vitesscell.Name(vt.Name, cell.Name), + }, &vtc) + if err != nil { + return nil, err + } + + // Get the vtgate service from the cell + vtgateService := corev1.Service{} + err = r.client.Get(ctx, client.ObjectKey{ + Namespace: vtc.Namespace, + Name: vtgate.ServiceName(vt.Name, cell.Name), + }, &vtgateService) + if err != nil { + return nil, err + } + + // Find the IP address from the service. This is randomly assigned. + vtgateServiceIP := vtgateService.Spec.ClusterIP + // The grpc port should be set to the default value planetscalev2.DefaultGrpcPort, + // but since we have the service, we can just read it. + var vtgateServiceGrpcPort int32 + for _, port := range vtgateService.Spec.Ports { + if port.Name == planetscalev2.DefaultGrpcPortName { + vtgateServiceGrpcPort = port.Port + } + } + + // Variables to hold the key, value and secret name to use + discoveryKey := "discovery.json" + discoveryVal := fmt.Sprintf(`{ + "vtctlds": [ + { + "host": { + "fqdn": "%s:%d", + "hostname": "%s:%d" + } + } + ], + "vtgates": [ + { + "host": { + "hostname": "%s:%d" + } + } + ] +}`, vtctldServiceIP, vtctldServiceWebPort, vtctldServiceIP, vtctldServiceGrpcPort, vtgateServiceIP, vtgateServiceGrpcPort) + secretName := vtadmin.DiscoverySecretName(vt.Name, cell.Name) + + // Create or update the secret + err = r.createOrUpdateSecret(ctx, vt, secretName, discoveryKey, discoveryVal) + if err != nil { + return nil, err + } + + // return the secret source, which must align with the secret we created above + return &planetscalev2.SecretSource{ + Name: secretName, + Key: discoveryKey, + }, nil +} + +func (r *ReconcileVitessCluster) createWebConfigSecret(ctx context.Context, vt *planetscalev2.VitessCluster, cell *planetscalev2.VitessCellTemplate, apiAddress string) (*planetscalev2.SecretSource, error) { + // Variables to hold the key, value and secret name to use + configKey := vtadmin.WebConfigFileName + configVal := fmt.Sprintf(`window.env = { + 'REACT_APP_VTADMIN_API_ADDRESS': "%s", + 'REACT_APP_FETCH_CREDENTIALS': "omit", + 'REACT_APP_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS': false, + 'REACT_APP_BUGSNAG_API_KEY': "", + 'REACT_APP_DOCUMENT_TITLE': "", + 'REACT_APP_READONLY_MODE': %s, + };`, apiAddress, convertReadOnlyFieldToString(vt.Spec.VtAdmin.ReadOnly)) + secretName := vtadmin.WebConfigSecretName(vt.Name, cell.Name) + + // Create or update the secret + err := r.createOrUpdateSecret(ctx, vt, secretName, configKey, configVal) + if err != nil { + return nil, err + } + + // return the secret source, which must align with the secret we created above + return &planetscalev2.SecretSource{ + Name: secretName, + Key: configKey, + }, nil +} + +func convertReadOnlyFieldToString(readOnly *bool) string { + if readOnly != nil && *readOnly { + return "true" + } + return "false" +} + +func (r *ReconcileVitessCluster) createOrUpdateSecret(ctx context.Context, vt *planetscalev2.VitessCluster, secretName, discoveryKey, discoveryVal string) error { + desiredSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: vt.Namespace, + }, + Type: corev1.SecretTypeOpaque, + StringData: map[string]string{ + discoveryKey: discoveryVal, + }, + } + + secret := corev1.Secret{} + err := r.client.Get(ctx, client.ObjectKey{ + Name: secretName, + Namespace: vt.Namespace, + }, &secret) + if err != nil { + // Create the secret + return r.client.Create(ctx, desiredSecret) + } + // Update the secret + return r.client.Update(ctx, desiredSecret) +} diff --git a/pkg/controller/vitesscluster/vitesscluster_controller.go b/pkg/controller/vitesscluster/vitesscluster_controller.go index b1fc6679..a93f1845 100644 --- a/pkg/controller/vitesscluster/vitesscluster_controller.go +++ b/pkg/controller/vitesscluster/vitesscluster_controller.go @@ -206,6 +206,10 @@ func (r *ReconcileVitessCluster) Reconcile(cctx context.Context, request reconci vtctldResult, err := r.reconcileVtctld(ctx, vt) resultBuilder.Merge(vtctldResult, err) + // Create/update vtadmin deployments. + vtadminResult, err := r.reconcileVtadmin(ctx, vt) + resultBuilder.Merge(vtadminResult, err) + // Create/update Vitess topology records for cells as needed. topoResult, err := r.reconcileTopology(ctx, vt) resultBuilder.Merge(topoResult, err) diff --git a/pkg/operator/secrets/volume_mount.go b/pkg/operator/secrets/volume_mount.go index 7642d4f1..69de6822 100644 --- a/pkg/operator/secrets/volume_mount.go +++ b/pkg/operator/secrets/volume_mount.go @@ -41,6 +41,9 @@ type VolumeMount struct { // DirName is the name of the directory under VolumeMountDir in which to // mount this SecretSource. DirName string + // AbsolutePath stores the absolute path to use instead of the generated path + // with /vt/secret as the prefix + AbsolutePath string } // Mount creates a VolumeMount for a given SecretSource. @@ -61,6 +64,9 @@ func (v *VolumeMount) VolumeName() string { // DirPath returns the absolute path to the mounted SecretSource volume. func (v *VolumeMount) DirPath() string { + if len(v.AbsolutePath) > 0 { + return v.AbsolutePath + } return filepath.Join(VolumeMountRootDir, v.DirName) } diff --git a/pkg/operator/vtadmin/deployment.go b/pkg/operator/vtadmin/deployment.go new file mode 100644 index 00000000..ba976d85 --- /dev/null +++ b/pkg/operator/vtadmin/deployment.go @@ -0,0 +1,362 @@ +/* +Copyright 2022 PlanetScale Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtadmin + +import ( + "fmt" + "planetscale.dev/vitess-operator/pkg/operator/secrets" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2" + "planetscale.dev/vitess-operator/pkg/operator/k8s" + "planetscale.dev/vitess-operator/pkg/operator/names" + "planetscale.dev/vitess-operator/pkg/operator/update" + "planetscale.dev/vitess-operator/pkg/operator/vitess" +) + +const ( + apiContainerName = "vtadmin-api" + webContainerName = "vtadmin-web" + + webDir = "/vt/web/vtadmin" + apiCommand = "/vt/bin/vtadmin" + + rbacConfigDirName = "rbac-config" + discoveryStaticFilePath = "discovery-config" + + webConfigVolumeName = "config-js" + // Directory where web config should be mounted + webConfigDirPath = "/vt/web/vtadmin/build/config" + // WebConfigFileName is the file name of the web config + WebConfigFileName = "config.js" +) + +// DeploymentName returns the name of the vtadmin Deployment for a given cell. +func DeploymentName(clusterName, cellName string) string { + return names.JoinWithConstraints(names.DefaultConstraints, clusterName, cellName, planetscalev2.VtadminComponentName) +} + +// DiscoverySecretName returns the name of the vtadmin discovery sercret's name for a given cell. +func DiscoverySecretName(clusterName, cellName string) string { + return names.JoinWithConstraints(names.DefaultConstraints, clusterName, cellName, planetscalev2.VtadminComponentName, "discovery") +} + +// WebConfigSecretName returns the name of the vtadmin web config sercret's name for a given cell. +func WebConfigSecretName(clusterName, cellName string) string { + return names.JoinWithConstraints(names.DefaultConstraints, clusterName, cellName, planetscalev2.VtadminComponentName, "webConfig") +} + +// Spec specifies all the internal parameters needed to deploy vtadmin, +// as opposed to the API type planetscalev2.VtAdminSpec, which is the public API. +type Spec struct { + Cell *planetscalev2.VitessCellTemplate + // Discovery holds the secret information for the vtctld and vtgate + // endpoints to use by vtadmin + Discovery *planetscalev2.SecretSource + Rbac *planetscalev2.SecretSource + WebConfig *planetscalev2.SecretSource + Image string + ClusterName string + ImagePullPolicy corev1.PullPolicy + ImagePullSecrets []corev1.LocalObjectReference + Labels map[string]string + Replicas int32 + APIResources corev1.ResourceRequirements + WebResources corev1.ResourceRequirements + Affinity *corev1.Affinity + ExtraFlags map[string]string + ExtraEnv []corev1.EnvVar + ExtraVolumes []corev1.Volume + ExtraVolumeMounts []corev1.VolumeMount + InitContainers []corev1.Container + SidecarContainers []corev1.Container + Annotations map[string]string + ExtraLabels map[string]string + Tolerations []corev1.Toleration +} + +// NewDeployment creates a new Deployment object for vtadmin. +func NewDeployment(key client.ObjectKey, spec *Spec) *appsv1.Deployment { + // Fill in the immutable parts. + obj := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: key.Namespace, + Name: key.Name, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: spec.Labels, + }, + }, + } + // Set everything else. + UpdateDeployment(obj, spec) + return obj +} + +// UpdateDeploymentImmediate updates the mutable parts of the vtadmin Deployment +// that are safe to change immediately. +func UpdateDeploymentImmediate(obj *appsv1.Deployment, spec *Spec) { + // Set labels on the Deployment object. + update.Labels(&obj.Labels, spec.Labels) + + // Scaling up or down doesn't require a rolling update. + obj.Spec.Replicas = pointer.Int32Ptr(spec.Replicas) +} + +// UpdateDeployment updates the mutable parts of the vtadmin Deployment +// that should be changed as part of a gradual, rolling update. +func UpdateDeployment(obj *appsv1.Deployment, spec *Spec) { + UpdateDeploymentImmediate(obj, spec) + + // Reset Pod template labels so we remove old ones. + obj.Spec.Template.Labels = nil + // Tell Deployment to set the same labels on the Pods it creates. + update.Labels(&obj.Spec.Template.Labels, spec.Labels) + // Tell Deployment to set user labels on the Pods it creates. + update.Labels(&obj.Spec.Template.Labels, spec.ExtraLabels) + + // Tell Deployment to set annotations on Pods that it creates. + obj.Spec.Template.Annotations = spec.Annotations + + // Deployment options. + obj.Spec.RevisionHistoryLimit = pointer.Int32Ptr(0) + + // Reset the list of volumes in the template so we remove old ones. + obj.Spec.Template.Spec.Volumes = nil + + // Apply user-provided flag overrides after generating base flags. + apiFlags := spec.apiFlags() + for key, value := range spec.ExtraFlags { + // We told users in the CRD API field doc not to put any leading '-', + // but people may not read that so we are liberal in what we accept. + key = strings.TrimLeft(key, "-") + apiFlags[key] = value + } + + // Set only the Pod template fields we care about. + // Use functions from the `operator/update` package for lists + // that should actually be treated like maps (update items by the .Name field). + obj.Spec.Template.Spec.ImagePullSecrets = spec.ImagePullSecrets + obj.Spec.Template.Spec.PriorityClassName = planetscalev2.DefaultVitessPriorityClass + obj.Spec.Template.Spec.ServiceAccountName = planetscalev2.DefaultVitessServiceAccount + obj.Spec.Template.Spec.Tolerations = spec.Tolerations + update.Volumes(&obj.Spec.Template.Spec.Volumes, spec.ExtraVolumes) + + securityContext := &corev1.SecurityContext{} + if planetscalev2.DefaultVitessRunAsUser >= 0 { + securityContext.RunAsUser = pointer.Int64Ptr(planetscalev2.DefaultVitessRunAsUser) + } + + update.PodTemplateContainers(&obj.Spec.Template.Spec.InitContainers, spec.InitContainers) + update.PodTemplateContainers(&obj.Spec.Template.Spec.Containers, spec.SidecarContainers) + + // Make a copy of Resources since it contains pointers. + var apiContainerResources corev1.ResourceRequirements + vtadminAPIContainer := &corev1.Container{ + Name: apiContainerName, + Image: spec.Image, + ImagePullPolicy: spec.ImagePullPolicy, + Command: []string{apiCommand}, + Ports: []corev1.ContainerPort{ + { + Name: planetscalev2.DefaultAPIPortName, + Protocol: corev1.ProtocolTCP, + ContainerPort: planetscalev2.DefaultAPIPort, + }, + }, + Resources: apiContainerResources, + SecurityContext: securityContext, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromString(planetscalev2.DefaultAPIPortName), + }, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromString(planetscalev2.DefaultAPIPortName), + }, + }, + InitialDelaySeconds: 300, + FailureThreshold: 30, + }, + VolumeMounts: spec.ExtraVolumeMounts, + Env: spec.ExtraEnv, + } + update.ResourceRequirements(&apiContainerResources, &spec.APIResources) + updateRbac(spec, apiFlags, vtadminAPIContainer, &obj.Spec.Template.Spec) + updateDiscovery(spec, apiFlags, vtadminAPIContainer, &obj.Spec.Template.Spec) + vtadminAPIContainer.Args = apiFlags.FormatArgs() + + var webContainerResources corev1.ResourceRequirements + vtadminWebContainer := &corev1.Container{ + Name: webContainerName, + Image: spec.Image, + ImagePullPolicy: spec.ImagePullPolicy, + Ports: []corev1.ContainerPort{ + { + Name: planetscalev2.DefaultWebPortName, + Protocol: corev1.ProtocolTCP, + ContainerPort: planetscalev2.DefaultWebPort, + }, + }, + Command: []string{webDir + "/node_modules/.bin/serve"}, + Args: []string{ + // Symlinks are required because the web config file is mounted as a + // secret which happens to be mounted as symlink instead of an actual file + "--symlinks", + "--no-clipboard", + "-l", fmt.Sprintf("%d", planetscalev2.DefaultWebPort), + "-s", + webDir + "/build", + }, + Resources: webContainerResources, + SecurityContext: securityContext, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromString(planetscalev2.DefaultWebPortName), + }, + }, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromString(planetscalev2.DefaultWebPortName), + }, + }, + InitialDelaySeconds: 300, + FailureThreshold: 30, + }, + VolumeMounts: spec.ExtraVolumeMounts, + Env: spec.ExtraEnv, + } + updateWebConfig(spec, vtadminWebContainer, &obj.Spec.Template.Spec) + update.ResourceRequirements(&webContainerResources, &spec.WebResources) + update.PodTemplateContainers(&obj.Spec.Template.Spec.Containers, []corev1.Container{*vtadminAPIContainer, *vtadminWebContainer}) + + if spec.Affinity != nil { + obj.Spec.Template.Spec.Affinity = spec.Affinity + } else if spec.Cell.Zone != "" { + // Limit to a specific zone. + obj.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: k8s.ZoneFailureDomainLabel, + Operator: corev1.NodeSelectorOpIn, + Values: []string{spec.Cell.Zone}, + }, + }, + }, + }, + }, + }, + } + } else { + obj.Spec.Template.Spec.Affinity = nil + } +} + +func (spec *Spec) apiFlags() vitess.Flags { + return vitess.Flags{ + "addr": fmt.Sprintf(":%d", planetscalev2.DefaultAPIPort), + "http-origin": "*", + "tracer": "opentracing-jaeger", + "grpc-tracing": true, + "http-tracing": true, + + "logtostderr": true, + "alsologtostderr": true, + } +} + +// updateRbac updates the rbac flags and creates the mount for rbac configuration if specified +func updateRbac(spec *Spec, flags vitess.Flags, container *corev1.Container, podSpec *corev1.PodSpec) { + if spec.Rbac != nil { + rbacConfigFile := secrets.Mount(spec.Rbac, rbacConfigDirName) + flags["rbac"] = true + flags["rbac-config"] = rbacConfigFile.FilePath() + + // Add the volume to the Pod, if needed. + update.Volumes(&podSpec.Volumes, rbacConfigFile.PodVolumes()) + // Mount the volume in the Container. + container.VolumeMounts = append(container.VolumeMounts, rbacConfigFile.ContainerVolumeMount()) + } else { + flags["no-rbac"] = true + } +} + +// updateDiscovery updates the cluster flag and mounts the discovery file +func updateDiscovery(spec *Spec, flags vitess.Flags, container *corev1.Container, podSpec *corev1.PodSpec) { + discoveryFile := secrets.Mount(spec.Discovery, discoveryStaticFilePath) + // Add the volume to the Pod, if needed. + update.Volumes(&podSpec.Volumes, discoveryFile.PodVolumes()) + // Mount the volume in the Container. + container.VolumeMounts = append(container.VolumeMounts, discoveryFile.ContainerVolumeMount()) + + clusterFlagVal, clusterFlagExists := flags["cluster"] + var clusterFlagStringProvided string + var isString bool + if clusterFlagExists { + clusterFlagStringProvided, isString = clusterFlagVal.(string) + if !isString { + return + } + } + + // We use the cluster name as the identifier + clusterIdentifier := spec.ClusterName + // If it is not provided, then we use "cluster" + if clusterIdentifier == "" { + clusterIdentifier = "cluster" + } + clusterFlagString := fmt.Sprintf("id=%s,name=%s,discovery=staticfile,discovery-staticfile-path=%s", clusterIdentifier, clusterIdentifier, discoveryFile.FilePath()) + if len(clusterFlagStringProvided) != 0 { + clusterFlagString += "," + clusterFlagStringProvided + } + flags["cluster"] = clusterFlagString +} + +// updateWebConfig mounts the webConfig file to a specific mount path +func updateWebConfig(spec *Spec, container *corev1.Container, podSpec *corev1.PodSpec) { + webConfigFile := secrets.Mount(spec.WebConfig, webConfigVolumeName) + // Set the absolute path since we need the config file to reside in this specific location + // We don't want the mount to happen on a generated directory path + webConfigFile.AbsolutePath = webConfigDirPath + // Add the volume to the Pod, if needed. + update.Volumes(&podSpec.Volumes, webConfigFile.PodVolumes()) + // Mount the volume in the Container. + container.VolumeMounts = append(container.VolumeMounts, webConfigFile.ContainerVolumeMount()) +} diff --git a/pkg/operator/vtadmin/service.go b/pkg/operator/vtadmin/service.go new file mode 100644 index 00000000..791fcb3b --- /dev/null +++ b/pkg/operator/vtadmin/service.go @@ -0,0 +1,74 @@ +/* +Copyright 2022 PlanetScale Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtadmin + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2" + "planetscale.dev/vitess-operator/pkg/operator/names" + "planetscale.dev/vitess-operator/pkg/operator/update" +) + +// ServiceName returns the name of the vtadmin Service for a cluster. +func ServiceName(clusterName string) string { + return names.JoinWithConstraints(names.ServiceConstraints, clusterName, planetscalev2.VtadminComponentName) +} + +// NewService creates a new Service object for vtadmin. +func NewService(key client.ObjectKey, labels map[string]string) *corev1.Service { + // Fill in the immutable parts. + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: key.Namespace, + Name: key.Name, + }, + } + // Set everything else. + UpdateService(obj, labels) + return obj +} + +// UpdateService updates the mutable parts of the vtadmin Service. +func UpdateService(obj *corev1.Service, labels map[string]string) { + update.Labels(&obj.Labels, labels) + + obj.Spec.Selector = labels + + // Using named TargetPorts instead of hard-coded port numbers means that + // each Pod can decide what port numbers to use. + // The Pod just needs to assign the proper name to those ports so we + // can find them. + // VTAdmin has 1 web port where the UI is served. It also has an API port. + obj.Spec.Ports = []corev1.ServicePort{ + { + Name: planetscalev2.DefaultWebPortName, + Protocol: corev1.ProtocolTCP, + Port: planetscalev2.DefaultWebPort, + TargetPort: intstr.FromString(planetscalev2.DefaultWebPortName), + }, + { + Name: planetscalev2.DefaultAPIPortName, + Protocol: corev1.ProtocolTCP, + Port: planetscalev2.DefaultAPIPort, + TargetPort: intstr.FromString(planetscalev2.DefaultAPIPortName), + }, + } +} diff --git a/pkg/operator/vtctld/deployment.go b/pkg/operator/vtctld/deployment.go index 4746a3f7..18f18f6b 100644 --- a/pkg/operator/vtctld/deployment.go +++ b/pkg/operator/vtctld/deployment.go @@ -39,7 +39,7 @@ const ( command = "/vt/bin/vtctld" webDir = "/vt/src/vitess.io/vitess/web/vtctld" webDir2 = "/vt/src/vitess.io/vitess/web/vtctld2/app" - serviceMap = "grpc-vtctl" + serviceMap = "grpc-vtctl,grpc-vtctld" ) // DeploymentName returns the name of the vtctld Deployment for a given cell. diff --git a/test/endtoend/operator/101_initial_cluster_vtadmin.yaml b/test/endtoend/operator/101_initial_cluster_vtadmin.yaml new file mode 100644 index 00000000..ee1adafd --- /dev/null +++ b/test/endtoend/operator/101_initial_cluster_vtadmin.yaml @@ -0,0 +1,231 @@ +# The following example is minimalist. The security policies +# and resource specifications are not meant to be used in production. +# Please refer to the operator documentation for recommendations on +# production settings. +apiVersion: planetscale.com/v2 +kind: VitessCluster +metadata: + name: example +spec: + images: + vtctld: vitess/lite:v14.0.0-rc1 + vtadmin: vitess/vtadmin:latest + vtgate: vitess/lite:v14.0.0-rc1 + vttablet: vitess/lite:v14.0.0-rc1 + vtbackup: vitess/lite:v14.0.0-rc1 + mysqld: + mysql56Compatible: vitess/lite:v14.0.0-rc1 + mysqldExporter: prom/mysqld-exporter:v0.11.0 + cells: + - name: zone1 + gateway: + authentication: + static: + secret: + name: example-cluster-config + key: users.json + replicas: 1 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + memory: 256Mi + vitessDashboard: + cells: + - zone1 + extraFlags: + security_policy: read-only + replicas: 1 + resources: + limits: + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + vtadmin: + rbac: + name: example-cluster-config + key: rbac.yaml + cells: + - zone1 + apiAddresses: + - http://localhost:14001 + replicas: 1 + readOnly: false + apiResources: + limits: + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + webResources: + limits: + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + + keyspaces: + - name: commerce + turndownPolicy: Immediate + partitionings: + - equal: + parts: 1 + shardTemplate: + databaseInitScriptSecret: + name: example-cluster-config + key: init_db.sql + replication: + enforceSemiSync: true + tabletPools: + - cell: zone1 + type: replica + replicas: 3 + vttablet: + extraFlags: + db_charset: utf8mb4 + resources: + limits: + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + mysqld: + resources: + limits: + memory: 512Mi + requests: + cpu: 100m + memory: 512Mi + dataVolumeClaimTemplate: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi + updateStrategy: + type: Immediate +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-cluster-config +type: Opaque +stringData: + users.json: | + { + "user": [{ + "UserData": "user", + "Password": "" + }] + } + init_db.sql: | + # This file is executed immediately after mysql_install_db, + # to initialize a fresh data directory. + + ############################################################################### + # Equivalent of mysql_secure_installation + ############################################################################### + + # Changes during the init db should not make it to the binlog. + # They could potentially create errant transactions on replicas. + SET sql_log_bin = 0; + # Remove anonymous users. + DELETE FROM mysql.user WHERE User = ''; + + # Disable remote root access (only allow UNIX socket). + DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; + + # Remove test database. + DROP DATABASE IF EXISTS test; + + ############################################################################### + # Vitess defaults + ############################################################################### + + # Vitess-internal database. + CREATE DATABASE IF NOT EXISTS _vt; + # Note that definitions of local_metadata and shard_metadata should be the same + # as in production which is defined in go/vt/mysqlctl/metadata_tables.go. + CREATE TABLE IF NOT EXISTS _vt.local_metadata ( + name VARCHAR(255) NOT NULL, + value VARCHAR(255) NOT NULL, + db_name VARBINARY(255) NOT NULL, + PRIMARY KEY (db_name, name) + ) ENGINE=InnoDB; + CREATE TABLE IF NOT EXISTS _vt.shard_metadata ( + name VARCHAR(255) NOT NULL, + value MEDIUMBLOB NOT NULL, + db_name VARBINARY(255) NOT NULL, + PRIMARY KEY (db_name, name) + ) ENGINE=InnoDB; + + # Admin user with all privileges. + CREATE USER 'vt_dba'@'localhost'; + GRANT ALL ON *.* TO 'vt_dba'@'localhost'; + GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + + # User for app traffic, with global read-write access. + CREATE USER 'vt_app'@'localhost'; + GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_app'@'localhost'; + + # User for app debug traffic, with global read access. + CREATE USER 'vt_appdebug'@'localhost'; + GRANT SELECT, SHOW DATABASES, PROCESS ON *.* TO 'vt_appdebug'@'localhost'; + + # User for administrative operations that need to be executed as non-SUPER. + # Same permissions as vt_app here. + CREATE USER 'vt_allprivs'@'localhost'; + GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_allprivs'@'localhost'; + + # User for slave replication connections. + # TODO: Should we set a password on this since it allows remote connections? + CREATE USER 'vt_repl'@'%'; + GRANT REPLICATION SLAVE ON *.* TO 'vt_repl'@'%'; + + # User for Vitess filtered replication (binlog player). + # Same permissions as vt_app. + CREATE USER 'vt_filtered'@'localhost'; + GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_filtered'@'localhost'; + + # User for Orchestrator (https://github.com/openark/orchestrator). + # TODO: Reenable when the password is randomly generated. + #CREATE USER 'orc_client_user'@'%' IDENTIFIED BY 'orc_client_user_password'; + #GRANT SUPER, PROCESS, REPLICATION SLAVE, RELOAD + # ON *.* TO 'orc_client_user'@'%'; + #GRANT SELECT + # ON _vt.* TO 'orc_client_user'@'%'; + + FLUSH PRIVILEGES; + + RESET SLAVE ALL; + RESET MASTER; + rbac.yaml: | + rules: + - resource: "*" + actions: + - "get" + - "create" + - "put" + - "ping" + subjects: ["*"] + clusters: ["*"] + - resource: "Shard" + actions: + - "emergency_reparent_shard" + - "planned_reparent_shard" + subjects: ["*"] + clusters: + - "local" diff --git a/test/endtoend/operator/operator-latest.yaml b/test/endtoend/operator/operator-latest.yaml index dee0c872..947b6f57 100644 --- a/test/endtoend/operator/operator-latest.yaml +++ b/test/endtoend/operator/operator-latest.yaml @@ -286,6 +286,7 @@ status: plural: "" conditions: [] storedVersions: [] + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -345,6 +346,7 @@ status: plural: "" conditions: [] storedVersions: [] + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -471,6 +473,8 @@ spec: type: string endpoint: type: string + forcePathStyle: + type: boolean keyPrefix: maxLength: 256 pattern: ^[^\r\n]*$ @@ -515,6 +519,7 @@ status: plural: "" conditions: [] storedVersions: [] + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -770,6 +775,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: @@ -795,6 +802,8 @@ spec: type: object lockserver: properties: + cellInfoAddress: + type: string etcd: properties: advertisePeerURLs: @@ -1120,6 +1129,7 @@ status: plural: "" conditions: [] storedVersions: [] + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -1254,6 +1264,8 @@ spec: type: string endpoint: type: string + forcePathStyle: + type: boolean keyPrefix: maxLength: 256 pattern: ^[^\r\n]*$ @@ -1482,6 +1494,8 @@ spec: type: object lockserver: properties: + cellInfoAddress: + type: string etcd: properties: advertisePeerURLs: @@ -1759,6 +1773,8 @@ spec: type: object globalLockserver: properties: + cellInfoAddress: + type: string etcd: properties: advertisePeerURLs: @@ -2016,6 +2032,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: @@ -2049,6 +2067,8 @@ spec: type: object mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: @@ -3104,6 +3124,184 @@ spec: tolerations: x-kubernetes-preserve-unknown-fields: true type: object + vtadmin: + properties: + affinity: + x-kubernetes-preserve-unknown-fields: true + annotations: + additionalProperties: + type: string + type: object + apiAddresses: + items: + type: string + type: array + apiResources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + cells: + items: + type: string + type: array + extraEnv: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + extraFlags: + additionalProperties: + type: string + type: object + extraLabels: + additionalProperties: + type: string + type: object + extraVolumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + extraVolumes: + x-kubernetes-preserve-unknown-fields: true + initContainers: + x-kubernetes-preserve-unknown-fields: true + rbac: + properties: + key: + type: string + name: + type: string + volumeName: + type: string + required: + - key + type: object + readOnly: + type: boolean + replicas: + format: int32 + type: integer + service: + properties: + annotations: + additionalProperties: + type: string + type: object + clusterIP: + type: string + type: object + sidecarContainers: + x-kubernetes-preserve-unknown-fields: true + tolerations: + x-kubernetes-preserve-unknown-fields: true + webResources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + required: + - apiAddresses + type: object required: - cells type: object @@ -3202,6 +3400,13 @@ spec: serviceName: type: string type: object + vtadmin: + properties: + available: + type: string + serviceName: + type: string + type: object type: object type: object served: true @@ -3214,6 +3419,7 @@ status: plural: "" conditions: [] storedVersions: [] + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -3347,6 +3553,8 @@ spec: type: string endpoint: type: string + forcePathStyle: + type: boolean keyPrefix: maxLength: 256 pattern: ^[^\r\n]*$ @@ -3389,6 +3597,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: @@ -4450,6 +4660,7 @@ status: plural: "" conditions: [] storedVersions: [] + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -4583,6 +4794,8 @@ spec: type: string endpoint: type: string + forcePathStyle: + type: boolean keyPrefix: maxLength: 256 pattern: ^[^\r\n]*$ @@ -4636,6 +4849,8 @@ spec: type: string mysqldExporter: type: string + vtadmin: + type: string vtbackup: type: string vtctld: diff --git a/test/endtoend/operator/pf_vtadmin.sh b/test/endtoend/operator/pf_vtadmin.sh new file mode 100755 index 00000000..ebd392aa --- /dev/null +++ b/test/endtoend/operator/pf_vtadmin.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +kubectl port-forward --address localhost "$(kubectl get service --selector="planetscale.com/component=vtctld" -o name | head -n1)" 15000 15999 & +process_id1=$! +kubectl port-forward --address localhost "$(kubectl get service --selector="planetscale.com/component=vtgate,!planetscale.com/cell" -o name | head -n1)" 15306:3306 & +process_id2=$! +kubectl port-forward --address localhost "$(kubectl get service --selector="planetscale.com/component=vtadmin" -o name | head -n1)" 14000:15000 14001:15001 & +process_id3=$! +sleep 2 +echo "You may point your browser to http://localhost:15000, use the following aliases as shortcuts:" +echo 'alias vtctlclient="vtctlclient -server=localhost:15999 -logtostderr"' +echo 'alias mysql="mysql -h 127.0.0.1 -P 15306 -u user"' +echo "Hit Ctrl-C to stop the port forwards" +wait $process_id1 +wait $process_id2 +wait $process_id3 diff --git a/test/endtoend/vtadmin_test.sh b/test/endtoend/vtadmin_test.sh new file mode 100755 index 00000000..b7458c48 --- /dev/null +++ b/test/endtoend/vtadmin_test.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +source ./tools/test.env +source ./test/endtoend/utils.sh + +# get_started_vtadmin: +function get_started_vtadmin() { + echo "Apply latest operator-latest.yaml" + kubectl apply -f "operator-latest.yaml" + checkPodStatusWithTimeout "vitess-operator(.*)1/1(.*)Running(.*)" + + echo "Apply 101_initial_cluster_vtadmin.yaml" + kubectl apply -f "101_initial_cluster_vtadmin.yaml" + checkPodStatusWithTimeout "example-zone1-vtctld(.*)1/1(.*)Running(.*)" + checkPodStatusWithTimeout "example-zone1-vtgate(.*)1/1(.*)Running(.*)" + checkPodStatusWithTimeout "example-etcd(.*)1/1(.*)Running(.*)" 3 + checkPodStatusWithTimeout "example-vttablet-zone1(.*)3/3(.*)Running(.*)" 3 + checkPodStatusWithTimeout "example-zone1-vtadmin(.*)2/2(.*)Running(.*)" + + sleep 10 + echo "Creating vschema and commerce SQL schema" + + ./pf_vtadmin.sh > /dev/null 2>&1 & + sleep 5 + + waitForKeyspaceToBeServing commerce - 2 + sleep 5 + + applySchemaWithRetry create_commerce_schema.sql commerce drop_all_commerce_tables.sql + vtctlclient ApplyVSchema -vschema="$(cat vschema_commerce_initial.json)" commerce + if [ $? -ne 0 ]; then + echo "ApplySchema failed for initial commerce" + printMysqlErrorFiles + exit 1 + fi + sleep 5 + + echo "show databases;" | mysql | grep "commerce" > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "Could not find commerce database" + printMysqlErrorFiles + exit 1 + fi + + echo "show tables;" | mysql commerce | grep -E 'corder|customer|product' | wc -l | grep 3 > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "Could not find commerce's tables" + printMysqlErrorFiles + exit 1 + fi + + insertWithRetry + + assertSelect ../common/select_commerce_data.sql "commerce" << EOF +Using commerce +Customer ++-------------+--------------------+ +| customer_id | email | ++-------------+--------------------+ +| 1 | alice@domain.com | +| 2 | bob@domain.com | +| 3 | charlie@domain.com | +| 4 | dan@domain.com | +| 5 | eve@domain.com | ++-------------+--------------------+ +Product ++----------+-------------+-------+ +| sku | description | price | ++----------+-------------+-------+ +| SKU-1001 | Monitor | 100 | +| SKU-1002 | Keyboard | 30 | ++----------+-------------+-------+ +COrder ++----------+-------------+----------+-------+ +| order_id | customer_id | sku | price | ++----------+-------------+----------+-------+ +| 1 | 1 | SKU-1001 | 100 | +| 2 | 2 | SKU-1002 | 30 | +| 3 | 3 | SKU-1002 | 30 | +| 4 | 4 | SKU-1002 | 30 | +| 5 | 5 | SKU-1002 | 30 | ++----------+-------------+----------+-------+ +EOF +} + +# verifyVtadminSetup verifies that we can query the vtadmin api end point +function verifyVtadminSetup() { + # Verify the debug/env page can be curled and it contains the kubernetes environment variables like HOSTNAME + curlGetRequestWithRetry "localhost:14001/debug/env" "HOSTNAME=example-zone1-vtadmin" + # Verify the api/keyspaces page can be curled and it contains the name of the keyspace created + curlGetRequestWithRetry "localhost:14001/api/keyspaces" "commerce" + # Verify the other APIs work as well + curlGetRequestWithRetry "localhost:14001/api/tablets" '"tablets":\[{"cluster":{"id":"example","name":"example"},"tablet":{"alias":{"cell":"zone1"' + curlGetRequestWithRetry "localhost:14001/api/schemas" '"keyspace":"commerce","table_definitions":\[{"name":"corder","schema":"CREATE TABLE `corder` (\\n `order_id` bigint(20) NOT NULL AUTO_INCREMENT' + # Verify that we are able to create a keyspace + curlPostRequest "localhost:14001/api/keyspace/example" '{"name":"testKeyspace"}' + # List the keyspaces and check that we have them both + curlGetRequestWithRetry "localhost:14001/api/keyspaces" "commerce.*testKeyspace" + # Try and delete the keyspace but this should fail because of the rbac rules + curlDeleteRequest "localhost:14001/api/keyspace/example/testKeyspace" "unauthorized.*cannot.*delete.*keyspace" + # We should still have both the keyspaces + curlGetRequestWithRetry "localhost:14001/api/keyspaces" "commerce.*testKeyspace" + # Delete the keyspace by using the vtctlclient + vtctlclient DeleteKeyspace testKeyspace + # Verify we still have the commerce keyspace and no other keyspace + curlGetRequestWithRetry "localhost:14001/api/keyspaces" "commerce.*}}}}]" + + # Also verify that the web page works + chromiumHeadlessRequest "http://localhost:14000/schemas" "corder" + chromiumHeadlessRequest "http://localhost:14000/schemas" "customer" + chromiumHeadlessRequest "http://localhost:14000/keyspace/example/commerce/shards" "commerce/-" +} + +function chromiumHeadlessRequest() { + url=$1 + dataToAssert=$2 + for i in {1..600} ; do + chromiumBinary=$(getChromiumBinaryName) + res=$($chromiumBinary --headless --no-sandbox --disable-gpu --enable-logging --dump-dom "$url") + if [ $? -eq 0 ]; then + echo "$res" | grep "$dataToAssert" > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "The data in $url is incorrect, got:\n$res" + exit 1 + fi + return + fi + echo "failed to query url $url, retrying (attempt #$i) ..." + sleep 1 + done +} + +function getChromiumBinaryName() { + which chromium-browser > /dev/null + if [ $? -eq 0 ]; then + echo "chromium-browser" + return + fi + which chromium > /dev/null + if [ $? -eq 0 ]; then + echo "chromium" + return + fi +} + +function curlGetRequestWithRetry() { + url=$1 + dataToAssert=$2 + for i in {1..600} ; do + res=$(curl "$url") + if [ $? -eq 0 ]; then + echo "$res" | grep "$dataToAssert" > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "The data in $url is incorrect, got:\n$res" + exit 1 + fi + return + fi + echo "failed to query url $url, retrying (attempt #$i) ..." + sleep 1 + done +} + +function curlDeleteRequest() { + url=$1 + dataToAssert=$2 + res=$(curl -X DELETE "$url") + if [ $? -ne 0 ]; then + echo -e "The DELETE request to $url failed\n" + exit 1 + fi + echo "$res" | grep "$dataToAssert" > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "The data in delete request to $url is incorrect, got:\n$res" + exit 1 + fi +} + +function curlPostRequest() { + url=$1 + data=$2 + curl -X POST -d "$data" "$url" + if [ $? -ne 0 ]; then + echo -e "The POST request to $url with data $data failed\n" + exit 1 + fi +} + +# Test setup +echo "Building the docker image" +docker build -f build/Dockerfile.release -t vitess-operator-pr:latest . +echo "Creating Kind cluster" +kind create cluster --wait 30s --name kind-${BUILDKITE_BUILD_ID} +echo "Loading docker image into Kind cluster" +kind load docker-image vitess-operator-pr:latest --name kind-${BUILDKITE_BUILD_ID} + +cd "$PWD/test/endtoend/operator" +killall kubectl +setupKubectlAccessForCI + +get_started_vtadmin +verifyVtGateVersion "14.0.0" +checkSemiSyncSetup + +# Check Vtadmin is setup +# In get_started_vtadmin we verify that the pod for vtadmin exists and is healthy +# We now try and query the vtadmin api +verifyVtadminSetup + +# Teardown +echo "Deleting Kind cluster. This also deletes the volume associated with it" +kind delete cluster --name kind-${BUILDKITE_BUILD_ID}