diff --git a/.github/workflows/deploy-all.yml b/.github/workflows/deploy-all.yml index fedaca33cb8..552015508a7 100644 --- a/.github/workflows/deploy-all.yml +++ b/.github/workflows/deploy-all.yml @@ -9,6 +9,7 @@ on: # Remember to add v before the following version number unless the version is master. - master + jobs: deploy-zh: uses: ./.github/workflows/deploy-zh.yml diff --git a/.github/workflows/deploy-en.yml b/.github/workflows/deploy-en.yml index 1c4d0abf302..af9ef1695c3 100644 --- a/.github/workflows/deploy-en.yml +++ b/.github/workflows/deploy-en.yml @@ -1,12 +1,16 @@ name: Publish English docs via GitHub Pages env: + ACTIONTEST: master # no need to add v before the version number + on: workflow_call: push: branches: # Remember to add v before the following version number unless the version is master. + - master + paths: - 'docs-2.0-en/**' @@ -35,6 +39,7 @@ jobs: git fetch origin gh-pages --depth=1 # fix mike's CI update mike list mike deploy ${{ env.ACTIONTEST }} -p --rebase + # The set-default command is only applicable to the latest version and must be commented in older versions. # mike set-default ${{ env.ACTIONTEST }} -p --rebase # mike list diff --git a/.github/workflows/deploy-zh.yml b/.github/workflows/deploy-zh.yml index dfe52b7ecde..c066daf522f 100644 --- a/.github/workflows/deploy-zh.yml +++ b/.github/workflows/deploy-zh.yml @@ -1,7 +1,9 @@ name: Publish Chinese docs via GitHub Pages and upload to server env: # Remember to add v before the following version number unless the version is master. + ACTIONTEST: master + on: workflow_call: secrets: @@ -9,7 +11,9 @@ on: required: true push: branches: + - master + paths: - 'docs-2.0-zh/**' @@ -43,9 +47,11 @@ jobs: git fetch origin gh-pages-zh --depth=1 # fix mike's CI update mike list mike deploy ${{ env.ACTIONTEST }} -b gh-pages-zh -p --rebase + # The set-default command is only applicable to the latest version and must be commented in older versions. # mike set-default ${{ env.ACTIONTEST }} -b gh-pages-zh -p --rebase # mike list + - name: show Chinese git branches run: | diff --git a/docs-2.0-en/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md b/docs-2.0-en/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md index 92807d4f64e..790f0877559 100644 --- a/docs-2.0-en/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md +++ b/docs-2.0-en/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md @@ -163,6 +163,7 @@ ELSE END ENDS ENDS_WITH +FALSE FORCE FULLTEXT FUZZY @@ -230,6 +231,7 @@ TEXT TEXT_SEARCH THEN TOP +TRUE TTL_COL TTL_DURATION UNWIND @@ -242,6 +244,4 @@ VID_TYPE WILDCARD ZONE ZONES -FALSE -TRUE ``` diff --git a/docs-2.0-en/nebula-operator/1.introduction-to-nebula-operator.md b/docs-2.0-en/nebula-operator/1.introduction-to-nebula-operator.md index 2038c952474..e26976a43a1 100644 --- a/docs-2.0-en/nebula-operator/1.introduction-to-nebula-operator.md +++ b/docs-2.0-en/nebula-operator/1.introduction-to-nebula-operator.md @@ -32,7 +32,7 @@ NebulaGraph Operator does not support the v1.x version of NebulaGraph. NebulaGra | NebulaGraph | NebulaGraph Operator | | ------------- | -------------------- | -| 3.5.x ~ 3.6.0 | 1.5.0, 1.6.x | +| 3.5.x ~ 3.6.0 | 1.5.0 ~ 1.7.x | | 3.0.0 ~ 3.4.1 | 1.3.0, 1.4.0 ~ 1.4.2 | | 3.0.0 ~ 3.3.x | 1.0.0, 1.1.0, 1.2.0 | | 2.5.x ~ 2.6.x | 0.9.0 | @@ -43,9 +43,6 @@ NebulaGraph Operator does not support the v1.x version of NebulaGraph. NebulaGra - The 1.x version NebulaGraph Operator is not compatible with NebulaGraph of version below v3.x. - Starting from NebulaGraph Operator 0.9.0, logs and data are stored separately. Using NebulaGraph Operator 0.9.0 or later versions to manage a NebulaGraph 2.5.x cluster created with Operator 0.8.0 can cause compatibility issues. You can backup the data of the NebulaGraph 2.5.x cluster and then create a 2.6.x cluster with Operator 0.9.0. -### Feature limitations - -The NebulaGraph Operator scaling feature is only available for the Enterprise Edition of NebulaGraph clusters and does not support scaling the Community Edition version of NebulaGraph clusters. ## Release note diff --git a/docs-2.0-en/nebula-operator/2.deploy-nebula-operator.md b/docs-2.0-en/nebula-operator/2.deploy-nebula-operator.md index 0017f4e072f..bc66ed38ed7 100644 --- a/docs-2.0-en/nebula-operator/2.deploy-nebula-operator.md +++ b/docs-2.0-en/nebula-operator/2.deploy-nebula-operator.md @@ -18,7 +18,8 @@ Before installing NebulaGraph Operator, you need to install the following softwa !!! note - - If using a role-based access control policy, you need to enable [RBAC](https://kubernetes.io/docs/admin/authorization/rbac) (optional). + - If using a role-based access control policy, you need to enable [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) (optional). + - [CoreDNS](https://coredns.io/) is a flexible and scalable DNS server that is [installed](https://github.com/coredns/helm) for Pods in NebulaGraph clusters. diff --git a/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md b/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md index 28f3f501689..27f18e6c593 100644 --- a/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md +++ b/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md @@ -22,40 +22,42 @@ The following example shows how to create a NebulaGraph cluster by creating a cl 2. Create a file named `apps_v1alpha1_nebulacluster.yaml`. - - To create a NebulaGraph Community cluster - See [community cluster configurations](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/config/samples/apps_v1alpha1_nebulacluster.yaml). - - ??? Info "Expand to show parameter descriptions of community clusters" - - | Parameter | Default value | Description | - | :---- | :--- | :--- | - | `metadata.name` | - | The name of the created NebulaGraph cluster. | - |`spec.console`|-| Configuration of the Console service. For details, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console).| - | `spec.graphd.replicas` | `1` | The numeric value of replicas of the Graphd service. | - | `spec.graphd.image` | `vesoft/nebula-graphd` | The container image of the Graphd service. | - | `spec.graphd.version` | `{{nebula.tag}}` | The version of the Graphd service. | - | `spec.graphd.service` | - | The Service configurations for the Graphd service. | - | `spec.graphd.logVolumeClaim.storageClassName` | - | The log disk storage configurations for the Graphd service. | - | `spec.metad.replicas` | `1` | The numeric value of replicas of the Metad service. | - | `spec.metad.image` | `vesoft/nebula-metad` | The container image of the Metad service. | - | `spec.metad.version` | `{{nebula.tag}}` | The version of the Metad service. | - | `spec.metad.dataVolumeClaim.storageClassName` | - | The data disk storage configurations for the Metad service. | - | `spec.metad.logVolumeClaim.storageClassName`|- | The log disk storage configurations for the Metad service.| - | `spec.storaged.replicas` | `3` | The numeric value of replicas of the Storaged service. | - | `spec.storaged.image` | `vesoft/nebula-storaged` | The container image of the Storaged service. | - | `spec.storaged.version` | `{{nebula.tag}}` | The version of the Storaged service. | - | `spec.storaged.dataVolumeClaims.resources.requests.storage` | - | Data disk storage size for the Storaged service. You can specify multiple data disks to store data. When multiple disks are specified, the storage path is `/usr/local/nebula/data1`, `/usr/local/nebula/data2`, etc.| - | `spec.storaged.dataVolumeClaims.resources.storageClassName` | - | The data disk storage configurations for Storaged. If not specified, the global storage parameter is applied. | - | `spec.storaged.logVolumeClaim.storageClassName`|- | The log disk storage configurations for the Storaged service.| - | `spec.storaged.enableAutoBalance` | `true` |Whether to balance data automatically. | - |`spec..securityContext`|`{}`|Defines privilege and access control settings for NebulaGraph service containers. For details, see [SecurityContext](https://github.com/vesoft-inc/nebula-operator/blob/{{operator.branch}}/doc/user/security_context.md). | - |`spec.agent`|`{}`| Configuration of the Agent service. This is used for backup and recovery as well as log cleanup functions. If you do not customize this configuration, the default configuration will be used.| - | `spec.reference.name` | - | The name of the dependent controller. | - | `spec.schedulerName` | - | The scheduler name. | - | `spec.imagePullPolicy` | The image policy to pull the NebulaGraph image. For details, see [Image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy). | The image pull policy in Kubernetes. | - |`spec.logRotate`| - |Log rotation configuration. For more information, see [Manage cluster logs](../8.custom-cluster-configurations/8.4.manage-running-logs.md).| - |`spec.enablePVReclaim`|`false`|Define whether to automatically delete PVCs and release data after deleting the cluster. For more information, see [Reclaim PVs](../8.custom-cluster-configurations/8.2.pv-reclaim.md).| + See [community cluster configurations](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/config/samples/nebulacluster.yaml). + + The following table describes the parameters in the sample configuration file. + + | Parameter | Default value | Description | + | :---------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | `metadata.name` | - | The name of the created NebulaGraph cluster. | + | `spec.console` | - | Configuration of the Console service. For details, see [nebula-console](https://github.com/vesoft-inc/nebula-operator/blob/v{{operator.release}}/doc/user/nebula_console.md#nebula-console). | + | `spec.graphd.replicas` | `1` | The numeric value of replicas of the Graphd service. | + | `spec.graphd.image` | `vesoft/nebula-graphd` | The container image of the Graphd service. | + | `spec.graphd.version` | `v3.6.0` | The version of the Graphd service. | + | `spec.graphd.service` | - | The Service configurations for the Graphd service. | + | `spec.graphd.logVolumeClaim.storageClassName` | - | The log disk storage configurations for the Graphd service. | + | `spec.metad.replicas` | `1` | The numeric value of replicas of the Metad service. | + | `spec.metad.image` | `vesoft/nebula-metad` | The container image of the Metad service. | + | `spec.metad.version` | `v3.6.0` | The version of the Metad service. | + | `spec.metad.dataVolumeClaim.storageClassName` | - | The data disk storage configurations for the Metad service. | + | `spec.metad.logVolumeClaim.storageClassName` | - | The log disk storage configurations for the Metad service. | + | `spec.storaged.replicas` | `3` | The numeric value of replicas of the Storaged service. | + | `spec.storaged.image` | `vesoft/nebula-storaged` | The container image of the Storaged service. | + | `spec.storaged.version` | `v3.6.0` | The version of the Storaged service. | + | `spec.storaged.dataVolumeClaims.resources.requests.storage` | - | Data disk storage size for the Storaged service. You can specify multiple data disks to store data. When multiple disks are specified, the storage path is `/usr/local/nebula/data1`, `/usr/local/nebula/data2`, etc. | + | `spec.storaged.dataVolumeClaims.resources.storageClassName` | - | The data disk storage configurations for Storaged. If not specified, the global storage parameter is applied. | + | `spec.storaged.logVolumeClaim.storageClassName` | - | The log disk storage configurations for the Storaged service. | + | `spec.storaged.enableAutoBalance` | `true` | Whether to balance data automatically. | + | `spec..securityContext` | `{}` | Defines privilege and access control settings for NebulaGraph service containers. For details, see [SecurityContext](https://github.com/vesoft-inc/nebula-operator/blob/release-1.5/doc/user/security_context.md). | + | `spec.agent` | `{}` | Configuration of the Agent service. This is used for backup and recovery as well as log cleanup functions. If you do not customize this configuration, the default configuration will be used. | + | `spec.reference.name` | - | The name of the dependent controller. | + | `spec.schedulerName` | - | The scheduler name. | + | `spec.imagePullPolicy` | The image policy to pull the NebulaGraph image. For details, see [Image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy). | The image pull policy in Kubernetes. | + | `spec.logRotate` | - | Log rotation configuration. For more information, see [Manage cluster logs](../8.custom-cluster-configurations/8.4.manage-running-logs.md). | + | `spec.enablePVReclaim` | `false` | Define whether to automatically delete PVCs and release data after deleting the cluster. For more information, see [Reclaim PVs](../8.custom-cluster-configurations/8.2.pv-reclaim.md). | + | | | | + + 3. Create a NebulaGraph cluster. @@ -84,7 +86,9 @@ The following example shows how to create a NebulaGraph cluster by creating a cl ## Scaling clusters -- The cluster scaling feature is for NebulaGraph Enterprise Edition only. + +The cluster scaling feature is for NebulaGraph Enterprise Edition only. + ## Delete clusters diff --git a/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md b/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md index 5021f367068..66454b702a0 100644 --- a/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md +++ b/docs-2.0-en/nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md @@ -65,23 +65,11 @@ kubectl -n "${NEBULA_CLUSTER_NAMESPACE}" get pod -l "app.kubernetes.io/cluster=${NEBULA_CLUSTER_NAME}" ``` - Output: - - ```bash - NAME READY STATUS RESTARTS AGE - nebula-graphd-0 1/1 Running 0 5m34s - nebula-graphd-1 1/1 Running 0 5m34s - nebula-metad-0 1/1 Running 0 5m34s - nebula-metad-1 1/1 Running 0 5m34s - nebula-metad-2 1/1 Running 0 5m34s - nebula-storaged-0 1/1 Running 0 5m34s - nebula-storaged-1 1/1 Running 0 5m34s - nebula-storaged-2 1/1 Running 0 5m34s - ``` ## Scaling clusters -- The cluster scaling feature is for NebulaGraph Enterprise Edition only. +The cluster scaling feature is for NebulaGraph Enterprise Edition only. + ## Delete clusters diff --git a/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md index 8ca11e3f7e8..997933e94b3 100644 --- a/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md +++ b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md @@ -1,6 +1,8 @@ -# Customize configuration parameters for a NebulaGraph cluster -Meta, Storage, and Graph services in a NebulaGraph Cluster have their own configuration settings, which are defined in the YAML file of the NebulaGraph cluster instance as `config`. These settings are mapped and loaded into the corresponding service's ConfigMap in Kubernetes. At the time of startup, the configuration present in the ConfigMap is mounted onto the directory `/usr/local/nebula/etc/` for every service. +# Customize parameters for a NebulaGraph cluster + +Meta, Storage, and Graph services in a NebulaGraph cluster have their own configuration settings, which are defined in the YAML file of the NebulaGraph cluster instance as `config`. These settings are mapped and loaded into the corresponding service's ConfigMap in Kubernetes. At the time of startup, the configuration present in the ConfigMap is mounted onto the directory `/usr/local/nebula/etc/` for every service. + !!! note @@ -11,6 +13,9 @@ The structure of `config` is as follows. ```go Config map[string]string `json:"config,omitempty"` ``` + + + ## Prerequisites You have created a NebulaGraph cluster. For how to create a cluster with Kubectl, see [Create a cluster with Kubectl](../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). @@ -25,7 +30,9 @@ The following example uses a cluster named `nebula` and the cluster's configurat kubectl edit nebulaclusters.apps.nebula-graph.io nebula ``` -2. Add `enable_authorize` and `auth_type` under `spec.graphd.config`. + +2. Customize parameters under the `spec.graphd.config` field. In the following sample, the `enable_authorize` and `auth_type` parameters are used for demonstration purposes. + ```yaml apiVersion: apps.nebula-graph.io/v1alpha1 @@ -55,20 +62,122 @@ The following example uses a cluster named `nebula` and the cluster's configurat "auth_type": "password" ... ``` + + + The parameters that can be added under the `config` field are listed in detail in the [Meta service configuration parameters](../../5.configurations-and-logs/1.configurations/2.meta-config.md), [Storage service configuration parameters](../../5.configurations-and-logs/1.configurations/4.storage-config.md), and [Graph service configuration parameters](../../5.configurations-and-logs/1.configurations/3.graph-config.md) topics. + + !!! note + + * To update cluster configurations without incurring pod restart, ensure that all parameters added under the `config` field support runtime dynamic modification. Check the **Whether supports runtime dynamic modifications** column of the parameter tables on the aforementioned parameter details pages to see if a parameter supports runtime dynamic modification. + * If one or more parameters that do not support runtime dynamic modification are added under the `config` field, pod restart is required for the parameters to take effect. + + To add the `config` for the Meta and Storage services, add `spec.metad.config` and `spec.storaged.config` respectively. 3. Run `kubectl apply -f nebula_cluster.yaml` to push your configuration changes to the cluster. - After customizing the parameters `enable_authorize` and `auth_type`, the configurations in the corresponding ConfigMap (`nebula-graphd`) of the Graph service will be overwritten. -## Modify cluster configurations online + After customizing the parameters, the configurations in the corresponding ConfigMap (`nebula-graphd`) of the Graph service will be overwritten. + + +## Customize port configurations + +You can add the `port` and `ws_http_port` parameters under the `config` field to customize port configurations. For details about these two parameters, see the Networking configurations section in [Meta service configuration parameters](../../5.configurations-and-logs/1.configurations/2.meta-config.md), [Storage service configuration parameters](../../5.configurations-and-logs/1.configurations/4.storage-config.md), and [Graph service configuration parameters](../../5.configurations-and-logs/1.configurations/3.graph-config.md). + +!!! note + + * Pod restart is required for the `port` and `ws_http_port` parameters to take effect. + * It is NOT recommnended to modify the `port` parameter after the cluster is started. + +1. Modifiy the cluster configuration file. + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + namespace: default + spec: + graphd: + config: + port: "3669" + ws_http_port: "8080" + resources: + requests: + cpu: "200m" + memory: "500Mi" + limits: + cpu: "1" + memory: "1Gi" + replicas: 1 + image: vesoft/nebula-graphd + version: {{nebula.tag}} + metad: + config: + ws_http_port: 8081 + resources: + requests: + cpu: "300m" + memory: "500Mi" + limits: + cpu: "1" + memory: "1Gi" + replicas: 1 + image: vesoft/nebula-metad + version: {{nebula.tag}} + dataVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: local-path + storaged: + config: + ws_http_port: 8082 + resources: + requests: + cpu: "300m" + memory: "500Mi" + limits: + cpu: "1" + memory: "1Gi" + replicas: 1 + image: vesoft/nebula-storaged + version: {{nebula.tag}} + dataVolumeClaims: + - resources: + requests: + storage: 2Gi + storageClassName: local-path + enableAutoBalance: true + reference: + name: statefulsets.apps + version: v1 + schedulerName: default-scheduler + imagePullPolicy: IfNotPresent + imagePullSecrets: + - name: nebula-image + enablePVReclaim: true + topologySpreadConstraints: + - topologyKey: kubernetes.io/hostname + whenUnsatisfiable: "ScheduleAnyway" + ``` -Cluster configurations are modified online by calling the HTTP interface, without the need to restart the cluster Pod. +2. Run the `kubectl apply -f nebula_cluster.yaml` to push your configuration changes to the cluster. -It should be noted that only when all configuration items in `config` are the parameters that can be dynamically modified at runtime, can the operation of online modifications be triggered. If the configuration items in `config` contain parameters that cannot be dynamically modified, then the cluster configuration will be updated by restarting the Pod. +3. Verify that the configuration takes effect. -For information about the parameters that can be dynamically modified for each service, see the parameter table column of **Whether supports runtime dynamic modifications** in [Meta service configuration parameters](../../5.configurations-and-logs/1.configurations/2.meta-config.md), [Storage service configuration parameters](../../5.configurations-and-logs/1.configurations/4.storage-config.md), and [Graph service configuration parameters](../../5.configurations-and-logs/1.configurations/3.graph-config.md), respectively. + ```bash + kubectl get svc + ``` + Sample response: + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + nebula-graphd-headless ClusterIP None 3669/TCP,8080/TCP 10m + nebula-graphd-svc ClusterIP 10.102.13.115 3669/TCP,8080/TCP 10m + nebula-metad-headless ClusterIP None 9559/TCP,8081/TCP 11m + nebula-storaged-headless ClusterIP None 9779/TCP,8082/TCP,9778/TCP 11m + ``` diff --git a/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md new file mode 100644 index 00000000000..653a7bab384 --- /dev/null +++ b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md @@ -0,0 +1,102 @@ +# Enable admission control + +Kubernetes [Admission Control](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) is a security mechanism running as a webhook at runtime. It intercepts and modifies requests to ensure the cluster's security. Admission webhooks involve two main operations: validation and mutation. NebulaGraph Operator supports only validation operations and provides some default admission control rules. This topic describes NebulaGraph Operator's default admission control rules and how to enable admission control. + +## Prerequisites + +You have already created a cluster using Kubernetes. For detailed steps, see [Creating a NebulaGraph Cluster with Kubectl](../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). + +## Admission control rules + +Kubernetes admission control allows you to insert custom logic or policies before Kubernetes API Server processes requests. This mechanism can be used to implement various security policies, such as restricting a Pod's resource consumption or limiting its access permissions. NebulaGraph Operator supports validation operations, which means it validates and intercepts requests without making changes. NebulaGraph Operator's default admission validation control rules include: + +- Ensuring the minimum number of replicas in high availability mode: + + - For Graph service: At least 2 replicas are required. + - For Meta service: At least 3 replicas are required. + - For Storage service: At least 3 replicas are required. + + !!! note + + High availability mode refers to the high availability of NebulaGraph cluster services. Storage and Meta services are stateful, and the number of replicas should be an odd number due to [Raft](../../1.introduction/3.nebula-graph-architecture/4.storage-service.md#raft) protocol requirements for data consistency. In high availability mode, at least 3 Storage services and 3 Meta services are required. Graph services are stateless, so their number of replicas can be even but should be at least 2. + +- Preventing additional PVs from being added to Storage service via `dataVolumeClaims`. + +- Disallowing shrinking the capacity of all service's PVCs, but allowing expansion. + +- Forbidding any secondary operation during Storage service scale-in/scale-out. + +## TLS certificates for admission webhooks + +To ensure secure communication and data integrity between the K8s API server and the admission webhook, this communication is done over HTTPS by default. This means that TLS certificates are required for the admission webhook. [cert-manager](https://cert-manager.io/docs/) is a Kubernetes certificate management controller that automates the issuance and renewal of certificates. NebulaGraph Operator uses cert-manager to manage certificates. + +Once cert-manager is installed and admission control is enabled, NebulaGraph Operator will automatically create an [Issuer](https://cert-manager.io/docs/concepts/issuer/) for issuing the necessary certificate for the admission webhook, and a [Certificate](https://cert-manager.io/docs/concepts/certificate/) for storing the issued certificate. The issued certificate is stored in the `nebula-operator-webhook-secret` Secret. + +## Steps of enabling admission control + +1. Install cert-manager. + + ```bash + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.1/cert-manager.yaml + ``` + + It is suggested to deploy the latest version of cert-manager. For details, see the [official cert-manager documentation](https://cert-manager.io/docs/installation/). + +2. Modify the NebulaGraph Operator configuration file to enable admission control. Admission control is disabled by default and needs to be enabled manually. + + ```bash + # Check the current configuration + helm show values nebula-operator/nebula-operator + ``` + + ```bash + # Modify the configuration by setting `enableAdmissionWebhook` to `true`. + helm upgrade nebula-operator nebula-operator/nebula-operator --set enableAdmissionWebhook=true + ``` + + !!! note + + `nebula-operator` is the name of the chart repository, and `nebula-operator/nebula-operator` is the chart name. If the chart's namespace is not specified, it defaults to `default`. + +3. View the certificate Secret for the admission webhook. + + ```bash + kubectl get secret nebula-operator-webhook-secret -o yaml + ``` + + If the output includes certificate contents, it means that the admission webhook's certificate has been successfully created. + +4. Verify the control rules. + + - Verify the minimum number of replicas in high availability mode. + + ```bash + # Annotate the cluster to enable high availability mode. + $ kubectl annotate nc nebula nebula-graph.io/ha-mode=true + # Verify the minimum number of the Graph service's replicas. + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"graphd": {"replicas":1}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: spec.graphd.replicas: Invalid value: 1: should be at least 2 in HA mode + ``` + + - Verify preventing additional PVs from being added to Storage service. + + ```bash + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"dataVolumeClaims":[{"resources": {"requests": {"storage": "2Gi"}}, "storageClassName": "local-path"},{"resources": {"requests": {"storage": "3Gi"}}, "storageClassName": "fask-disks"}]}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" deniedthe request: spec.storaged.dataVolumeClaims: Forbidden: storaged dataVolumeClaims is immutable + ``` + + - Verify disallowing shrinking Storage service's PVC capacity. + + ```bash + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"dataVolumeClaims":[{"resources": {"requests": {"storage": "1Gi"}}, "storageClassName": "fast-disks"}]}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: spec.storaged.dataVolumeClaims: Invalid value: resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}: data volume size can only be increased + ``` + + - Verify disallowing any secondary operation during Storage service scale-in. + + ```bash + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"replicas": 5}}}' + nebulacluster.apps.nebula-graph.io/nebula patched + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"replicas": 3}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: [spec.storaged: Forbidden: field is immutable while in ScaleOut phase, spec.storaged.replicas: Invalid value: 3: field is immutable while not in Running phase] + ``` diff --git a/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md new file mode 100644 index 00000000000..17e96ecea25 --- /dev/null +++ b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md @@ -0,0 +1,103 @@ +# Reclaim PVs + +NebulaGraph Operator uses PVs (Persistent Volumes) and PVCs (Persistent Volume Claims) to store persistent data. If you accidentally deletes a NebulaGraph cluster, by default, PV and PVC objects and the relevant data will be retained to ensure data security. + +You can also define the automatic deletion of PVCs to release data by setting the parameter `spec.enablePVReclaim` to `true` in the configuration file of the cluster instance. As for whether PV will be deleted automatically after PVC is deleted, you need to customize the PV reclaim policy. See [reclaimPolicy in StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) and [PV Reclaiming](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming) for details. + +## Prerequisites + +You have created a cluster. For how to create a cluster with Kubectl, see [Create a cluster with Kubectl](../../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). + + +## Steps + +The following example uses a cluster named `nebula` and the cluster's configuration file named `nebula_cluster.yaml` to show how to set `enablePVReclaim`: + +1. Run the following command to edit the `nebula` cluster's configuration file. + + ```bash + kubectl edit nebulaclusters.apps.nebula-graph.io nebula + ``` + +2. Add `enablePVReclaim` and set its value to `true` under `spec`. + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + spec: + enablePVReclaim: true //Set its value to true. + graphd: + image: vesoft/nebula-graphd + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: {{nebula.tag}} + imagePullPolicy: IfNotPresent + metad: + dataVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + image: vesoft/nebula-metad + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: {{nebula.tag}} + nodeSelector: + nebula: cloud + reference: + name: statefulsets.apps + version: v1 + schedulerName: default-scheduler + storaged: + dataVolumeClaims: + - resources: + requests: + storage: 2Gi + storageClassName: fast-disks + - resources: + requests: + storage: 2Gi + storageClassName: fast-disks + image: vesoft/nebula-storaged + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + replicas: 3 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: {{nebula.tag}} + ... + ``` + +3. Run `kubectl apply -f nebula_cluster.yaml` to push your configuration changes to the cluster. diff --git a/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md new file mode 100644 index 00000000000..c2be5cc5e57 --- /dev/null +++ b/docs-2.0-en/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md @@ -0,0 +1,99 @@ +# Dynamically expand persistent volumes + +In a Kubernetes environment, NebulaGraph's data is stored on Persistent Volumes (PVs). Dynamic volume expansion refers to increasing the capacity of a volume without stopping the service, enabling NebulaGraph to accommodate growing data. This topic explains how to dynamically expand the PV for NebulaGraph services in a Kubernetes environment. + +!!! note + + - After the cluster is created, you cannot dynamically increase the number of PVs while the cluster is running. + - The method described in this topic is only for online volume expansion and does not support volume reduction. + +## Background + +In Kubernetes, a StorageClass is a resource that defines a particular storage type. It describes a class of storage, including its provisioner, parameters, and other details. When creating a PersistentVolumeClaim (PVC) and specifying a StorageClass, Kubernetes automatically creates a corresponding PV. The principle of dynamic volume expansion is to edit the PVC and increase the volume's capacity. Kubernetes will then automatically expand the capacity of the PV associated with this PVC based on the specified `storageClassName` in the PVC. During this process, new PVs are not created; the size of the existing PV is changed. Only dynamic storage volumes, typically those associated with a `storageClassName`, support dynamic volume expansion. Additionally, the `allowVolumeExpansion` field in the StorageClass must be set to `true`. For more details, see the [Kubernetes documentation on expanding Persistent Volume Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims). + +In NebulaGraph Operator, you cannot directly edit PVC because Operator automatically creates PVC based on the configuration in the `spec..dataVolumeClaim` of the Nebula Graph cluster. Therefore, you need to modify the cluster's configuration to update the PVC and trigger dynamic online volume expansion for the PV. + +## Prerequisites + +- Kubernetes version is equal to or greater than 1.18. +- A StorageClass has been created in the Kubernetes environment. For details, see [Expanding Persistent Volumes Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims). + - Ensure the `allowVolumeExpansion` field in the StorageClass is set to `true`. + - Make sure that the `provisioner` configured in the StorageClass supports dynamic expansion. +- A NebulaGraph cluster has been created in Kubernetes. For specific steps, see [Create a Nebula Graph Cluster with Kubectl](../../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md). +- NebulaGraph cluster Pods are in running status. + +## Online volume expansion example + +In the following example, we assume that the StorageClass is named `ebs-sc` and the NebulaGraph cluster is named `nebula`. We will demonstrate how to dynamically expand the PV for the Storage service. + +1. Check the status of the Storage service Pod: + + ```bash + kubectl get pod + ``` + + Example output: + + ```bash + nebula-storaged-0 1/1 Running 0 43h + ``` + +2. Check the PVC and PV information for the Storage service: + + ```bash + # View PVC + kubectl get pvc + ``` + + Example output: + + ```bash + storaged-data-nebula-storaged-0 Bound pvc-36ca3871-9265-460f-b812-7e73a718xxxx 5Gi RWO ebs-sc 43h + ``` + + ```bash + # View PV and confirm that the capacity of the PV is 5Gi + kubectl get pv + ``` + + Example output: + + ```bash + pvc-36ca3871-9265-460f-b812-xxx 5Gi RWO Delete Bound default/storaged-data-nebula-storaged-0 ebs-sc 43h + ``` + +3. Assuming all the above-mentioned prerequisites are met, use the following command to request an expansion of the PV for the Storage service to 10Gi: + + ```bash + kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"dataVolumeClaims":[{"resources": {"requests": {"storage": "10Gi"}}, "storageClassName": "ebs-sc"}]}}}' + ``` + + Example output: + + ```bash + nebulacluster.apps.nebula-graph.io/nebula patched + ``` + +4. After waiting for about a minute, check the expanded PVC and PV information: + + ```bash + kubectl get pvc + ``` + + Example output: + + ```bash + storaged-data-nebula-storaged-0 Bound pvc-36ca3871-9265-460f-b812-7e73a718xxxx 10Gi RWO ebs-sc 43h + ``` + + ```bash + kubectl get pv + ``` + + Example output: + + ```bash + pvc-36ca3871-9265-460f-b812-xxx 10Gi RWO Delete Bound default/storaged-data-nebula-storaged-0 ebs-sc 43h + ``` + + As you can see, both the PVC and PV capacity have been expanded to 10Gi. \ No newline at end of file diff --git a/docs-2.0-zh/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md b/docs-2.0-zh/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md index c38f464d5ed..dd0663ad256 100644 --- a/docs-2.0-zh/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md +++ b/docs-2.0-zh/3.ngql-guide/1.nGQL-overview/keywords-and-reserved-words.md @@ -168,6 +168,7 @@ ELSE END ENDS ENDS_WITH +FALSE FORCE FULLTEXT FUZZY @@ -235,6 +236,7 @@ TEXT TEXT_SEARCH THEN TOP +TRUE TTL_COL TTL_DURATION UNWIND @@ -247,6 +249,4 @@ VID_TYPE WILDCARD ZONE ZONES -FALSE -TRUE ``` diff --git a/docs-2.0-zh/nebula-operator/1.introduction-to-nebula-operator.md b/docs-2.0-zh/nebula-operator/1.introduction-to-nebula-operator.md index 674d59d11f4..fdc32412d85 100644 --- a/docs-2.0-zh/nebula-operator/1.introduction-to-nebula-operator.md +++ b/docs-2.0-zh/nebula-operator/1.introduction-to-nebula-operator.md @@ -1,7 +1,6 @@ # 什么是 NebulaGraph Operator - ## 基本概念 NebulaGraph Operator 是用于在 [Kubernetes](https://kubernetes.io) 系统上自动化部署和运维 [NebulaGraph](https://github.com/vesoft-inc/nebula) 集群的工具。依托于 Kubernetes 扩展机制,{{nebula.name}}将其运维领域的知识全面注入至 Kubernetes 系统中,让{{nebula.name}}成为真正的云原生图数据库。 @@ -10,7 +9,8 @@ NebulaGraph Operator 是用于在 [Kubernetes](https://kubernetes.io) 系统上 ## 工作原理 -对于 Kubernetes 系统内不存在的资源类型,用户可以通过添加自定义 API 对象的方式注册,常见的方法是使用 [CustomResourceDefinition(CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) 。 +对于 Kubernetes 系统内不存在的资源类型,用户可以通过添加自定义 API 对象的方式注册,常见的方法是使用 [CustomResourceDefinition(CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions)。 + NebulaGraph Operator 将{{nebula.name}}集群的部署管理抽象为 CRD。通过结合多个内置的 API 对象,包括 StatefulSet、Service 和 ConfigMap,{{nebula.name}}集群的日常管理和维护被编码为一个控制循环。在 Kubernetes 系统内,每一种内置资源对象,都运行着一个特定的控制循环,将它的实际状态通过事先规定好的编排动作,逐步调整为最终的期望状态。当一个 CR 实例被提交时,NebulaGraph Operator 会根据控制流程驱动数据库集群进入最终状态。 @@ -36,7 +36,7 @@ NebulaGraph Operator 不支持 v1.x 版本的 NebulaGraph,其与{{nebula.name} | {{nebula.name}}版本 | NebulaGraph Operator 版本 | | ------------------- | ------------------------- | -| 3.5.x ~ 3.6.0 | 1.5.0 、1.6.x | +| 3.5.x ~ 3.6.0 | 1.5.0 ~ 1.7.x | | 3.0.0 ~ 3.4.1 | 1.3.0、1.4.0 ~ 1.4.2 | | 3.0.0 ~ 3.3.x | 1.0.0、1.1.0、1.2.0 | | 2.5.x ~ 2.6.x | 0.9.0 | diff --git a/docs-2.0-zh/nebula-operator/2.deploy-nebula-operator.md b/docs-2.0-zh/nebula-operator/2.deploy-nebula-operator.md index 0cdf4ee5cc4..99ed8dff0b1 100644 --- a/docs-2.0-zh/nebula-operator/2.deploy-nebula-operator.md +++ b/docs-2.0-zh/nebula-operator/2.deploy-nebula-operator.md @@ -18,7 +18,8 @@ !!! note - - 如果使用基于角色的访问控制的策略,用户需开启 [RBAC](https://kubernetes.io/docs/admin/authorization/rbac)(可选)。 + + - 如果使用基于角色的访问控制的策略,用户需开启 [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)(可选)。 - [CoreDNS](https://coredns.io/) 是一个灵活的、可扩展的 DNS 服务器,被[安装](https://github.com/coredns/helm)在集群内作为集群内 Pods 的 DNS 服务器。{{nebula.name}}集群中的每个组件通过 DNS 解析类似`x.default.svc.cluster.local`这样的域名相互通信。 ## 操作步骤 @@ -163,7 +164,8 @@ helm install nebula-operator nebula-operator/nebula-operator --namespace= 3669/TCP,8080/TCP 10m + nebula-graphd-svc ClusterIP 10.102.13.115 3669/TCP,8080/TCP 10m + nebula-metad-headless ClusterIP None 9559/TCP,8081/TCP 11m + nebula-storaged-headless ClusterIP None 9779/TCP,8082/TCP,9778/TCP 11m + ``` diff --git a/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md b/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md new file mode 100644 index 00000000000..2b5d916831b --- /dev/null +++ b/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md @@ -0,0 +1,103 @@ +# 开启准入控制 + +K8s 的[准入控制(Admission Control)](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)是一种安全机制,并在运行时作为 Webhook 运行。通过准入 Webhook 对请求进行拦截和修改,从而保证集群的安全性。准入 Webhook 操作包括验证(Validating)和变更(Mutating)两类。NebulaGraph Operator 仅支持验证操作,并提供一些默认的准入控制规则。本文介绍 NebulaGraph Operator 的默认准入控制规则及如何开启准入控制。 + +## 前提条件 + +已使用 K8s 创建一个集群。具体步骤,参见[使用 Kubectl 创建{{nebula.name}}集群](../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md)。 + +## 准入控制规则 + +K8s 的准入控制允许用户在 Kubernetes API Server 处理请求之前,插入自定义的逻辑或策略。这种机制可以用于实现一些安全策略,比如限制 Pod 的资源使用量,或者限制 Pod 的访问权限等。NebulaGraph Operator 仅支持验证操作,即对请求进行验证和拦截,不支持对请求进行变更操作。NebulaGraph Operator 默认的准入验证控制规则包括: + +- 满足高可用模式下的最小副本数: + + - Graph 服务:至少需要 2 个副本。 + - Meta 服务:至少需要 3 个副本。 + - Storage 服务:至少需要 3 个副本。 + + !!! note + + 高可用模式是指{{nebula.name}}集群服务的高可用。Storage 服务和 Meta 服务是有状态的服务,其副本数据通过 [Raft](../../1.introduction/3.nebula-graph-architecture/4.storage-service.md#raft) 协议保持一致性且副本数量不能为偶数。因此,高可用模式下,至少需要 3 个 Storage 服务和 3 个 Meta 服务。Graph 服务为无状态的服务,因此其副本数量可以为偶数,但至少需要 2 个副本。 + +- 禁止通过`dataVolumeClaims`为 Storage 服务追加额外的 PV。 + +- 禁止缩小所有服务的 PVC 的容量,但是可以扩容。 + +- 禁止在 Storage 服务扩缩容期间,进行任何二次操作。 + +## 为准入 Webhook 创建证书 + +为了确保通信的安全性和数据的完整性,K8s 的 API server 和准入 Webhook 之间的通信默认通过 HTTPS 协议进行,因此使用准入控制还需要为准入 Webhook 提供 TLS 证书。[cert-manager](https://cert-manager.io/docs/) 是一个 K8s 的证书管理控制器,可以自动化证书的签发和更新。NebulaGraph Operator 使用 cert-manager 来管理证书。 + +当 cert-manager 安装完成并且开启准入控制时,NebulaGraph Operator 会自动创建一个 [Issuer](https://cert-manager.io/docs/concepts/issuer/),用于签发准入 Webhook 所需的证书,同时会创建一个 [Certificate](https://cert-manager.io/docs/concepts/certificate/),用于存储签发的证书。签发的证书被存储在 +`nebula-operator-webhook-secret`的 Secret 中。 + +## 开启准入控制 + +1. 安装部署 cert-manager。 + + ```bash + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.1/cert-manager.yaml + ``` + + 建议部署最新版本 cert-manager。详情参见 [cert-manager 官方文档](https://cert-manager.io/docs/installation/)。 + +2. 修改 NebulaGraph Operator 的配置文件,开启准入控制。默认准入控制是关闭的,需要手动开启。 + + ```bash + # 查看当前的配置 + helm show values nebula-operator/nebula-operator + ``` + + ```bash + # 修改配置,将`enableAdmissionWebhook`设置为`true`。 + helm upgrade nebula-operator nebula-operator/nebula-operator --set enableAdmissionWebhook=true + ``` + + !!! note + + `nebula-operator`为 chart 所在仓库的名称,`nebula-operator/nebula-operator`为 chart 的名称。如果没有指定 chart 的命名空间,默认为`default`。 + +3. 查看准入 Webhook 的证书 Secret。 + + ```bash + kubectl get secret nebula-operator-webhook-secret -o yaml + ``` + + 如果输出的结果中包含证书内容,则表示准入 Webhook 的证书已经创建成功。 + +4. 验证控制规则。 + + - 验证高可用模式下的最小副本数。 + + ```bash + # 标注集群为高可用模式 + $ kubectl annotate nc nebula nebula-graph.io/ha-mode=true + # 验证 Graph 服务的最小副本数 + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"graphd": {"replicas":1}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: spec.graphd.replicas: Invalid value: 1: should be at least 2 in HA mode + ``` + + - 验证禁止通过`dataVolumeClaims`为 Storage 服务追加额外的 PV。 + + ```bash + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"dataVolumeClaims":[{"resources": {"requests": {"storage": "2Gi"}}, "storageClassName": "local-path"},{"resources": {"requests": {"storage": "3Gi"}}, "storageClassName": "fask-disks"}]}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: spec.storaged.dataVolumeClaims: Forbidden: storaged dataVolumeClaims is immutable + ``` + + - 验证禁止缩小 Storage 服务的 PVC 的容量。 + + ```bash + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"dataVolumeClaims":[{"resources": {"requests": {"storage": "1Gi"}}, "storageClassName": "fast-disks"}]}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: spec.storaged.dataVolumeClaims: Invalid value: resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}: data volume size can only be increased + ``` + + - 验证禁止在 Storage 服务缩容期间,进行任何二次操作。 + + ```bash + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"replicas": 5}}}' + nebulacluster.apps.nebula-graph.io/nebula patched + $ kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"replicas": 3}}}' + Error from server: admission webhook "nebulaclustervalidating.nebula-graph.io" denied the request: [spec.storaged: Forbidden: field is immutable while in ScaleOut phase, spec.storaged.replicas: Invalid value: 3: field is immutable while not in Running phase] + ``` \ No newline at end of file diff --git a/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md b/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md new file mode 100644 index 00000000000..52a2201a0d4 --- /dev/null +++ b/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md @@ -0,0 +1,104 @@ +# 回收 PV + +NebulaGraph Operator 使用持久化卷 PV(Persistent Volume)和持久化卷声明 PVC(Persistent Volume Claim)来存储持久化数据。如果用户不小心删除了一个{{nebula.name}}集群,默认 PV 和 PVC 对象及其数据仍可保留,以确保数据安全。 + +用户也可以在集群实例的配置文件中通过设置参数`spec.enablePVReclaim`为`true`来定义在删除集群后自动删除 PVC 以释放数据。至于在删除 PVC 后是否删除 PV,用户需要自定义 PV 的回收策略。参见 [StorageClass 中设置 reclaimPolicy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy) 和 [PV Reclaiming](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming) 了解 PV 回收策略。 + +## 前提条件 + +已使用 K8s 创建一个集群。具体步骤,参见[使用 Kubectl 创建{{nebula.name}}集群](../../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md)。 + +## 操作步骤 + +以下示例使用名为`nebula`的集群、名为`nebula_cluster.yaml`的 YAML 配置文件,说明如何设置`enablePVReclaim`: + +1. 执行以下命令进入`nebula`集群的编辑页面。 + + ```bash + kubectl edit nebulaclusters.apps.nebula-graph.io nebula + ``` + +2. 在 YAML 文件的`spec`配置项中,添加`enablePVReclaim`并设置其值为`true`。 + + ```yaml + apiVersion: apps.nebula-graph.io/v1alpha1 + kind: NebulaCluster + metadata: + name: nebula + spec: + enablePVReclaim: true //设置其值为 true。 + graphd: + image: vesoft/nebula-graphd + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: {{nebula.tag}} + imagePullPolicy: IfNotPresent + metad: + dataVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + image: vesoft/nebula-metad + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + replicas: 1 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: {{nebula.tag}} + nodeSelector: + nebula: cloud + reference: + name: statefulsets.apps + version: v1 + schedulerName: default-scheduler + storaged: + dataVolumeClaims: + - resources: + requests: + storage: 2Gi + storageClassName: fast-disks + - resources: + requests: + storage: 2Gi + storageClassName: fast-disks + image: vesoft/nebula-storaged + logVolumeClaim: + resources: + requests: + storage: 2Gi + storageClassName: fast-disks + replicas: 3 + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + version: {{nebula.tag}} + ... + ``` + +3. 执行`kubectl apply -f nebula_cluster.yaml`使上述更新生效。 + +当集群删除后,系统将会自动删除 PVC 以回收存储资源。是否删除 PV,取决于 PV 的回收策略。 diff --git a/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md b/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md new file mode 100644 index 00000000000..4e8e1403b54 --- /dev/null +++ b/docs-2.0-zh/nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md @@ -0,0 +1,95 @@ +# 动态在线扩容存储卷 + +在 K8s 环境中,{{nebula.name}}的数据存储在持久化存储卷(PV)上。动态在线扩容存储卷指的是在不停机的情况下增加存储卷的容量,以满足{{nebula.name}}数据增长的需求。本文介绍如何在 K8s 环境中为{{nebula.name}}的服务动态在线扩容存储卷。 + +!!! note + + - 集群创建后,不支持在集群运行时动态增加 PV 的数量。 + - 本文介绍的方法仅使用在线扩容存储卷,不支持在线缩容存储卷。 + +## 背景信息 + +K8s 中,StorageClass 是定义了一种存储类型的资源,它描述了一种存储的类,包括存储的提供者(provisioner)、参数和其他细节。当创建一个 PersistentVolumeClaim(PVC)并指定一个 StorageClass 时,K8s 会自动创建一个对应的 PV。动态扩容存储卷的原理是编辑 PVC 并增加存储卷的容量,然后 K8s 会根据 PVC 中指定的`storageClassName`自动扩容该 PVC 对应的 PV 的容量。在这个过程中,不会创建新的 PV,而是改变现有的 PV 的容量大小。只有动态存储卷才支持存储卷的动态扩容,即配置了`storageClassName`的 PVC。同时 StorageClass 的`allowVolumeExpansion`字段必须为`true`。详情参见 [Expanding Persistent Volumes Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims)。 + +在 Operator 中,不能直接编辑 PVC,因为 Operator 会根据{{nebula.name}}集群服务的配置`spec..dataVolumeClaim`自动创建 PVC。因此,需要通过修改集群的配置来实现 PVC 的配置更新,然后自动触发 PV 的动态在线扩容。 + +## 前提条件 + +- K8s 的版本等于或高于 1.18。 +- 已在 K8s 环境中创建 StorageClass。详情参见 [Expanding Persistent Volumes Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims)。 + - 确保 StorageClass 配置了`allowVolumeExpansion`字段并且值为`true`。 + - 确保 StorageClass 配置的`provisioner`支持动态扩容。 +- 在 K8s 中创建一个{{nebula.name}}集群。具体步骤,参见[使用 Kubectl 创建{{nebula.name}}集群](../../3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md)。 +- {{nebula.name}}集群 Pod 处于运行状态。 + +## 在线扩容存储卷示例 + +以下示例假设 StorageClass 的名称为`ebs-sc`,{{nebula.name}}集群的名称为`nebula`,演示如何在线扩容 Storage 服务的存储卷。 + +1. 查看 Storage 服务 Pod 的状态。 + + ```bash + kubectl get pod + ``` + + 示例输出: + ```bash + nebula-storaged-0 1/1 Running 0 43h + ``` + + +2. 查看 Storage 服务的 PVC 和 PV 信息。 + + ```bash + # 查看 PVC 信息 + kubectl get pvc + ``` + + 示例输出: + ```bash + storaged-data-nebula-storaged-0 Bound pvc-36ca3871-9265-460f-b812-7e73a718xxxx 5Gi RWO ebs-sc 43h + ``` + + ```bash + # 查看 PV 信息,确认 PV 的容量为 5Gi + kubectl get pv + ``` + + 示例输出: + + ```bash + pvc-36ca3871-9265-460f-b812-xxx 5Gi RWO Delete Bound default/storaged-data-nebula-storaged-0 ebs-sc 43h + ``` + +3. 在符合前提条件的情况下,执行以下命令请求扩容 Storage 服务的存储卷至 10Gi。 + + ```bash + kubectl patch nc nebula --type='merge' --patch '{"spec": {"storaged": {"dataVolumeClaims":[{"resources": {"requests": {"storage": "10Gi"}}, "storageClassName": "ebs-sc"}]}}}' + ``` + + 示例输出: + ```bash + nebulacluster.apps.nebula-graph.io/nebula patched + ``` + +4. 等待一分钟左右查看扩容后的 PVC 和 PV 信息。 + + ```bash + kubectl get pvc + ``` + + 示例输出: + ```bash + storaged-data-nebula-storaged-0 Bound pvc-36ca3871-9265-460f-b812-7e73a718xxxx 10Gi RWO ebs-sc 43h + ``` + + ```bash + kubectl get pv + ``` + + 示例输出: + ```bash + pvc-36ca3871-9265-460f-b812-xxx 10Gi RWO Delete Bound default/storaged-data-nebula-storaged-0 ebs-sc 43h + ``` + + 由上可见,PVC 和 PV 的容量都已扩容至 10Gi。 \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index a7d0499ca03..f703e168e2c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -82,12 +82,15 @@ markdown_extensions: - pymdownx.superfences - pymdownx.tabbed: alternate_style: true + # collapse content + - pymdownx.details # Plugins plugins: # image lightbox plugin(https://github.com/blueswen/mkdocs-glightbox#usage) - glightbox: zoomable: true + - search # This is the original mkdocs search plugin. To use algolia search, comment out this plugin. - macros: # zh.begin @@ -244,9 +247,9 @@ extra: branch: release-1.2 tag: v1.2.0 operator: - release: 1.5.0 - tag: v1.5.0 - branch: release-1.5 + release: 1.7.1 + tag: v1.7.1 + branch: release-1.7 upgrade_from: 3.5.0 upgrade_to: 3.6.0 exporter: @@ -575,19 +578,19 @@ nav: - What is NebulaGraph Operator: nebula-operator/1.introduction-to-nebula-operator.md - Overview of using NebulaGraph Operator: nebula-operator/6.get-started-with-operator.md - Deploy NebulaGraph Operator: nebula-operator/2.deploy-nebula-operator.md - - Deploy clusters: -#ent - Deploy LM: nebula-operator/3.deploy-nebula-graph-cluster/3.0.deploy-lm.md + - Deploy clusters: - Deploy clusters with Kubectl: nebula-operator/3.deploy-nebula-graph-cluster/3.1create-cluster-with-kubectl.md - Deploy clusters with Helm: nebula-operator/3.deploy-nebula-graph-cluster/3.2create-cluster-with-helm.md - Connect to NebulaGraph databases: nebula-operator/4.connect-to-nebula-graph-service.md - Configure clusters: - - Custom configuration parameters for a NebulaGraph cluster: nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md - - Reclaim PVs: nebula-operator/8.custom-cluster-configurations/8.2.pv-reclaim.md -#ent - Balance storage data after scaling out: nebula-operator/8.custom-cluster-configurations/8.3.balance-data-when-scaling-storage.md + - Customize parameters for a NebulaGraph cluster: nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md + - Storage: + - Dynamically expand persistent volumes: nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md + - Reclaim PVs: nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md - Manage cluster logs: nebula-operator/8.custom-cluster-configurations/8.4.manage-running-logs.md + - Enable admission control: nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md - Upgrade NebulaGraph clusters: nebula-operator/9.upgrade-nebula-cluster.md - Specify a rolling update strategy: nebula-operator/11.rolling-update-strategy.md -#ent - Backup and restore: nebula-operator/10.backup-restore-using-operator.md - Self-healing: nebula-operator/5.operator-failover.md - FAQ: nebula-operator/7.operator-faq.md @@ -927,10 +930,13 @@ nav: - 连接 NebulaGraph: nebula-operator/4.connect-to-nebula-graph-service.md - 配置 NebulaGraph: - 自定义 NebulaGraph 集群的配置参数: nebula-operator/8.custom-cluster-configurations/8.1.custom-conf-parameter.md - - 回收 PV: nebula-operator/8.custom-cluster-configurations/8.2.pv-reclaim.md + - 存储: + - 动态在线扩容存储卷: nebula-operator/8.custom-cluster-configurations/storage/8.6.dynamic-expand-pv.md + - 回收 PV: nebula-operator/8.custom-cluster-configurations/storage/8.2.pv-reclaim.md - 管理集群日志: nebula-operator/8.custom-cluster-configurations/8.4.manage-running-logs.md - - 配置滚动更新策略: nebula-operator/11.rolling-update-strategy.md + - 开启准入控制: nebula-operator/8.custom-cluster-configurations/8.6.admission-control.md - 升级 NebulaGraph: nebula-operator/9.upgrade-nebula-cluster.md + - 配置滚动更新策略: nebula-operator/11.rolling-update-strategy.md - 故障自愈: nebula-operator/5.operator-failover.md - 常见问题: nebula-operator/7.operator-faq.md