From 0858720f14a9ceb3414b4b5a24c33fc7a1c89ef0 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 13:15:23 +0200 Subject: [PATCH 01/26] initial commit --- internal/clients/elasticsearch/index.go | 88 +++++ internal/elasticsearch/transform/transform.go | 335 ++++++++++++++++++ .../elasticsearch/transform/transform_test.go | 115 ++++++ internal/models/transform.go | 39 ++ provider/provider.go | 2 + 5 files changed, 579 insertions(+) create mode 100644 internal/elasticsearch/transform/transform.go create mode 100644 internal/elasticsearch/transform/transform_test.go create mode 100644 internal/models/transform.go diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index b9c3054fb..28bfe3075 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -544,3 +544,91 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam } return diags } + +func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { + fmt.Println("entering PutTransform") + var diags diag.Diagnostics + pipelineBytes, err := json.Marshal(transform) + if err != nil { + return diag.FromErr(err) + } + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + opts := []func(*esapi.TransformPutTransformRequest){ + esClient.TransformPutTransform.WithContext(ctx), + esClient.TransformPutTransform.WithDeferValidation(params.DeferValidation), + esClient.TransformPutTransform.WithTimeout(params.Timeout), + } + + res, err := esClient.TransformPutTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + if err != nil { + return diag.FromErr(err) + } + + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to create transform: %s", transform.Name)); diags.HasError() { + return diags + } + + return diags +} + +func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.Transform, diag.Diagnostics) { + fmt.Println("entering GetTransform for ", *name) + var diags diag.Diagnostics + esClient, err := apiClient.GetESClient() + if err != nil { + return nil, diag.FromErr(err) + } + req := esClient.TransformGetTransform.WithTransformID(*name) + res, err := esClient.TransformGetTransform(req, esClient.TransformGetTransform.WithContext(ctx)) + if err != nil { + return nil, diag.FromErr(err) + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return nil, nil + } + if diags := utils.CheckError(res, fmt.Sprintf("Unable to get requested transform: %s", *name)); diags.HasError() { + return nil, diags + } + + transformsResponse := models.GetTransformResponse{} + if err := json.NewDecoder(res.Body).Decode(&transformsResponse); err != nil { + return nil, diag.FromErr(err) + } + + for _, t := range transformsResponse.Transforms { + if t.Id == *name { + t.Name = *name + return &t, diags + } + } + + return nil, diags +} + +func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name string) diag.Diagnostics { + fmt.Println("entering DeleteTransform for ", name) + var diags diag.Diagnostics + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + res, err := esClient.TransformDeleteTransform(name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) + if err != nil { + return diag.FromErr(err) + } + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete the transform: %s", name)); diags.HasError() { + return diags + } + + return diags +} diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go new file mode 100644 index 000000000..c6ca4ea36 --- /dev/null +++ b/internal/elasticsearch/transform/transform.go @@ -0,0 +1,335 @@ +package transform + +import ( + "context" + "encoding/json" + "fmt" + //"reflect" + "regexp" + //"strconv" + "strings" + "time" + + "github.com/elastic/terraform-provider-elasticstack/internal/clients" + "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch" + "github.com/elastic/terraform-provider-elasticstack/internal/models" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + //"github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + //"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceTransform() *schema.Resource { + transformSchema := map[string]*schema.Schema{ + "id": { + Description: "Internal identifier of the resource", + Type: schema.TypeString, + Computed: true, + }, + "name": { + Description: "Name of the transform you wish to create.", + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-z0-9_-]+$`), "must contain only lower case alphanumeric characters, hyphens, and underscores"), + validation.StringMatch(regexp.MustCompile(`^[a-z0-9].*[a-z0-9]$`), "must start and end with a lowercase alphanumeric character"), + ), + }, + "description": { + Description: "Free text description of the transform.", + Type: schema.TypeString, + Optional: true, + }, + "source": { + Description: "The source of the data for the transform.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "indices": { + Description: "The source indices for the transform.", + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query": { + Description: "A query clause that retrieves a subset of data from the source index.", + Type: schema.TypeString, + Optional: true, + Default: `{"match_all":{}}}`, + DiffSuppressFunc: utils.DiffJsonSuppress, + ValidateFunc: validation.StringIsJSON, + }, + "runtime_mappings": { + Description: "Definitions of search-time runtime fields that can be used by the transform.", + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "destination": { + Description: "The destination for the transform.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "index": { + Description: "The destination index for the transform.", + Type: schema.TypeString, + Required: true, + }, + "pipeline": { + Description: "The unique identifier for an ingest pipeline.", + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "pivot": { + Description: "The pivot method transforms the data by aggregating and grouping it.", + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"pivot", "latest"}, + DiffSuppressFunc: utils.DiffJsonSuppress, + ValidateFunc: validation.StringIsJSON, + ForceNew: true, + }, + "latest": { + Description: "The latest method transforms the data by finding the latest document for each unique key.", + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"pivot", "latest"}, + DiffSuppressFunc: utils.DiffJsonSuppress, + ValidateFunc: validation.StringIsJSON, + ForceNew: true, + }, + "frequency": { + Type: schema.TypeString, + Description: "The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`.", + Optional: true, + Default: "1m", + ValidateFunc: utils.StringIsDuration, + }, + "metadata": { + Description: "Defines optional transform metadata.", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: utils.DiffJsonSuppress, + }, + "defer_validation": { + Type: schema.TypeBool, + Description: "When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created.", + Optional: true, + Default: false, + }, + "timeout": { + Type: schema.TypeString, + Description: "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`.", + Optional: true, + Default: "30s", + ValidateFunc: utils.StringIsDuration, + }, + } + + utils.AddConnectionSchema(transformSchema) + + return &schema.Resource{ + Schema: transformSchema, + Description: "Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html", + + CreateContext: resourceTransformCreate, + ReadContext: resourceTransformRead, + UpdateContext: resourceTransformUpdate, + DeleteContext: resourceTransformDelete, + } +} + +func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformCreate") + + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + + transformName := d.Get("name").(string) + id, diags := client.ID(ctx, transformName) + if diags.HasError() { + return diags + } + + transform, err := getTransformFromResourceData(ctx, d, transformName) + if err != nil { + return diag.FromErr(err) + } + + params := models.PutTransformParams{ + DeferValidation: d.Get("defer_validation").(bool), + } + + timeout, err := time.ParseDuration(d.Get("timeout").(string)) + if err != nil { + return diag.FromErr(err) + } + params.Timeout = timeout + + if diags := elasticsearch.PutTransform(ctx, client, transform, ¶ms); diags.HasError() { + return diags + } + + d.SetId(id.String()) + return resourceTransformRead(ctx, d, meta) +} + +func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformRead") + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + compId, diags := clients.CompositeIdFromStr(d.Id()) + if diags.HasError() { + return diags + } + + transformName := compId.ResourceId + if err := d.Set("name", transformName); err != nil { + return diag.FromErr(err) + } + + transform, diags := elasticsearch.GetTransform(ctx, client, &transformName) + if transform == nil && diags == nil { + tflog.Warn(ctx, fmt.Sprintf(`Transform "%s" not found, removing from state`, compId.ResourceId)) + d.SetId("") + return diags + } + if diags.HasError() { + return diags + } + + return diags +} + +func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformDelete") + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + + id := d.Id() + compId, diags := clients.CompositeIdFromStr(id) + if diags.HasError() { + return diags + } + + if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + return diags + } + + return diags +} + +func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformUpdate") + // TODO + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + + transformName := d.Get("name").(string) + _, diags = client.ID(ctx, transformName) + if diags.HasError() { + return diags + } + + return resourceTransformRead(ctx, d, meta) +} + +func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, name string) (*models.Transform, error) { + + var transform models.Transform + transform.Name = name + + if v, ok := d.GetOk("description"); ok { + transform.Description = v.(string) + } + + if v, ok := d.GetOk("source"); ok { + definedSource := v.([]interface{})[0].(map[string]interface{}) + + indices := make([]string, 0) + for _, i := range definedSource["indices"].([]interface{}) { + indices = append(indices, i.(string)) + } + transform.Source = models.TransformSource{ + Indices: indices, + } + + if v, ok := definedSource["query"]; ok && len(v.(string)) > 0 { + var query interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&query); err != nil { + return nil, err + } + transform.Source.Query = query + } + + if v, ok := definedSource["runtime_mappings"]; ok && len(v.(string)) > 0 { + var runtimeMappings interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&runtimeMappings); err != nil { + return nil, err + } + transform.Source.RuntimeMappings = runtimeMappings + } + } + + if v, ok := d.GetOk("destination"); ok { + definedDestination := v.([]interface{})[0].(map[string]interface{}) + transform.Destination = models.TransformDestination{ + Index: definedDestination["index"].(string), + } + + if pipeline, ok := definedDestination["pipeline"]; ok { + transform.Destination.Pipeline = pipeline.(string) + } + } + + if v, ok := d.GetOk("pivot"); ok { + var pivot interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&pivot); err != nil { + return nil, err + } + transform.Pivot = pivot + } + + if v, ok := d.GetOk("latest"); ok { + var latest interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&latest); err != nil { + return nil, err + } + transform.Latest = latest + } + + if v, ok := d.GetOk("metadata"); ok { + metadata := make(map[string]interface{}) + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&metadata); err != nil { + return nil, err + } + transform.Meta = metadata + } + + return &transform, nil +} diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go new file mode 100644 index 000000000..cac2dab8f --- /dev/null +++ b/internal/elasticsearch/transform/transform_test.go @@ -0,0 +1,115 @@ +package transform_test + +import ( + //"context" + "fmt" + //"regexp" + "testing" + + "github.com/elastic/terraform-provider-elasticstack/internal/acctest" + "github.com/elastic/terraform-provider-elasticstack/internal/clients" + //"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/transform" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccResourceTransform(t *testing.T) { + transformName := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceTransformDestroy, + ProtoV5ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + Config: testAccResourceTransformCreate(transformName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "name", transformName), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "description", "test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "frequency", "5m"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "pivot.#", "1"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "latest.#", "0"), + ), + }, + // { + // Config: testAccResourceTransformUpdate(transformName), + // Check: resource.ComposeTestCheckFunc(), + // }, + }, + }) +} + +func testAccResourceTransformCreate(name string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_transform" "test" { + name = "%s" + description = "test description" + + source { + indices = ["source_index_for_transform"] + } + + destination { + index = "dest_index_for_transform" + } + + pivot = jsonencode({ + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id", + "missing_bucket": true + } + } + }, + "aggregations": { + "max_price": { + "max": { + "field": "taxful_total_price" + } + } + } + }) + frequency = "5m" + + defer_validation = true + timeout = "1m" +} + `, name) +} + +func checkResourceTransformDestroy(s *terraform.State) error { + client, err := clients.NewAcceptanceTestingClient() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "elasticstack_elasticsearch_transform" { + continue + } + compId, _ := clients.CompositeIdFromStr(rs.Primary.ID) + + esClient, err := client.GetESClient() + if err != nil { + return err + } + req := esClient.TransformGetTransform.WithTransformID(compId.ResourceId) + res, err := esClient.TransformGetTransform(req) + if err != nil { + return err + } + + if res.StatusCode != 404 { + return fmt.Errorf("Transform (%s) still exists", compId.ResourceId) + } + } + return nil +} diff --git a/internal/models/transform.go b/internal/models/transform.go new file mode 100644 index 000000000..11cbce7ab --- /dev/null +++ b/internal/models/transform.go @@ -0,0 +1,39 @@ +package models + +import ( + "encoding/json" + "time" +) + +type Transform struct { + Id string `json:"id,omitempty"` + Name string `json:"-"` + Description string `json:"description,omitempty"` + Source TransformSource `json:"source"` + Destination TransformDestination `json:"dest"` + Pivot interface{} `json:"pivot,omitempty"` + Latest interface{} `json:"latest,omitempty"` + Frequency string `json:"frequency,omitempty"` + Meta map[string]interface{} `json:"_meta,omitempty"` +} + +type TransformSource struct { + Indices []string `json:"index"` + Query interface{} `json:"query,omitempty"` + RuntimeMappings interface{} `json:"runtime_mappings,omitempty"` +} + +type TransformDestination struct { + Index string `json:"index"` + Pipeline string `json:"pipeline,omitempty"` +} + +type PutTransformParams struct { + DeferValidation bool + Timeout time.Duration +} + +type GetTransformResponse struct { + Count json.Number `json:"count,omitempty"` + Transforms []Transform `json:"transforms"` +} diff --git a/provider/provider.go b/provider/provider.go index ea0e7dd49..08b8aa64f 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -7,6 +7,7 @@ import ( "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/ingest" "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/logstash" "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/security" + "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/transform" "github.com/elastic/terraform-provider-elasticstack/internal/kibana" providerSchema "github.com/elastic/terraform-provider-elasticstack/internal/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -87,6 +88,7 @@ func New(version string) *schema.Provider { "elasticstack_elasticsearch_snapshot_lifecycle": cluster.ResourceSlm(), "elasticstack_elasticsearch_snapshot_repository": cluster.ResourceSnapshotRepository(), "elasticstack_elasticsearch_script": cluster.ResourceScript(), + "elasticstack_elasticsearch_transform": transform.ResourceTransform(), "elasticstack_kibana_space": kibana.ResourceSpace(), }, From 8985ad1a924f94e25658e436ea4425f2cdeb907b Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 15:08:33 +0200 Subject: [PATCH 02/26] support for Update --- internal/clients/elasticsearch/index.go | 32 +++ internal/elasticsearch/transform/transform.go | 46 +++- .../elasticsearch/transform/transform_test.go | 224 ++++++++++++++++-- internal/models/transform.go | 5 + 4 files changed, 279 insertions(+), 28 deletions(-) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index 28bfe3075..e3c665812 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -612,6 +612,38 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin return nil, diags } +func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { + fmt.Println("entering UpdateTransform") + var diags diag.Diagnostics + pipelineBytes, err := json.Marshal(transform) + if err != nil { + return diag.FromErr(err) + } + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + opts := []func(*esapi.TransformUpdateTransformRequest){ + esClient.TransformUpdateTransform.WithContext(ctx), + esClient.TransformUpdateTransform.WithDeferValidation(params.DeferValidation), + esClient.TransformUpdateTransform.WithTimeout(params.Timeout), + } + + res, err := esClient.TransformUpdateTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + if err != nil { + return diag.FromErr(err) + } + + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to update transform: %s", transform.Name)); diags.HasError() { + return diags + } + + return diags +} + func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name string) diag.Diagnostics { fmt.Println("entering DeleteTransform for ", name) var diags diag.Diagnostics diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index c6ca4ea36..017b66c65 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -222,41 +222,63 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int return diags } -func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformDelete") +func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformUpdate") + client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags } - id := d.Id() - compId, diags := clients.CompositeIdFromStr(id) + transformName := d.Get("name").(string) + _, diags = client.ID(ctx, transformName) if diags.HasError() { return diags } - if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + updatedTransform, err := getTransformFromResourceData(ctx, d, transformName) + if err != nil { + return diag.FromErr(err) + } + + updatedTransform.Pivot = nil + updatedTransform.Latest = nil + + params := models.UpdateTransformParams{ + DeferValidation: d.Get("defer_validation").(bool), + } + + timeout, err := time.ParseDuration(d.Get("timeout").(string)) + if err != nil { + return diag.FromErr(err) + } + params.Timeout = timeout + + if diags := elasticsearch.UpdateTransform(ctx, client, updatedTransform, ¶ms); diags.HasError() { return diags } - return diags + return resourceTransformRead(ctx, d, meta) } -func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformUpdate") - // TODO +func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformDelete") client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags } - transformName := d.Get("name").(string) - _, diags = client.ID(ctx, transformName) + id := d.Id() + compId, diags := clients.CompositeIdFromStr(id) if diags.HasError() { return diags } - return resourceTransformRead(ctx, d, meta) + if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + return diags + } + + return diags } func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, name string) (*models.Transform, error) { diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index cac2dab8f..703420cc2 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -14,41 +14,91 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccResourceTransform(t *testing.T) { - transformName := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) +func TestAccResourceTransformWithPivot(t *testing.T) { + + transformNamePivot := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceTransformDestroy, + ProtoV5ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + Config: testAccResourceTransformWithPivotCreate(transformNamePivot), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "name", transformNamePivot), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "description", "test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "5m"), + ), + }, + { + Config: testAccResourceTransformWithPivotUpdate(transformNamePivot), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "name", transformNamePivot), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "description", "yet another test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.1", "additional_index"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform_v2"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "10m"), + ), + }, + }, + }) +} +func TestAccResourceTransformWithLatest(t *testing.T) { + + transformNameLatest := sdkacctest.RandStringFromCharSet(20, sdkacctest.CharSetAlphaNum) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceTransformDestroy, + ProtoV5ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + Config: testAccResourceTransformWithLatestCreate(transformNameLatest), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "name", transformNameLatest), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "description", "test description (latest)"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "frequency", "2m"), + ), + }, + }, + }) +} + +func TestAccResourceTransformNoDefer(t *testing.T) { + + transformName := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) + indexName := sdkacctest.RandStringFromCharSet(22, sdkacctest.CharSetAlphaNum) resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, CheckDestroy: checkResourceTransformDestroy, ProtoV5ProviderFactories: acctest.Providers, Steps: []resource.TestStep{ { - Config: testAccResourceTransformCreate(transformName), + Config: testAccResourceTransformNoDeferCreate(transformName, indexName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "name", transformName), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "description", "test description"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "source.0.indices.0", "source_index_for_transform"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "destination.0.index", "dest_index_for_transform"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "frequency", "5m"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "pivot.#", "1"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "latest.#", "0"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "name", transformName), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "description", "test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", indexName), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "5m"), ), }, - // { - // Config: testAccResourceTransformUpdate(transformName), - // Check: resource.ComposeTestCheckFunc(), - // }, }, }) } -func testAccResourceTransformCreate(name string) string { +func testAccResourceTransformWithPivotCreate(name string) string { return fmt.Sprintf(` provider "elasticstack" { elasticsearch {} } -resource "elasticstack_elasticsearch_transform" "test" { +resource "elasticstack_elasticsearch_transform" "test_pivot" { name = "%s" description = "test description" @@ -85,6 +135,148 @@ resource "elasticstack_elasticsearch_transform" "test" { `, name) } +func testAccResourceTransformWithPivotUpdate(name string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_transform" "test_pivot" { + name = "%s" + description = "yet another test description" + + source { + indices = ["source_index_for_transform", "additional_index"] + } + + destination { + index = "dest_index_for_transform_v2" + } + + pivot = jsonencode({ + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id", + "missing_bucket": true + } + } + }, + "aggregations": { + "max_price": { + "max": { + "field": "taxful_total_price" + } + } + } + }) + frequency = "10m" + + defer_validation = true + timeout = "1m" +} + `, name) +} + +func testAccResourceTransformWithLatestCreate(name string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_transform" "test_latest" { + name = "%s" + description = "test description (latest)" + + source { + indices = ["source_index_for_transform"] + } + + destination { + index = "dest_index_for_transform" + } + + latest = jsonencode({ + "unique_key": ["customer_id"], + "sort": "order_date" + }) + frequency = "2m" + + defer_validation = true + timeout = "1m" +} + `, name) +} + +func testAccResourceTransformNoDeferCreate(transformName, indexName string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_index" "test_index" { + name = "%s" + + alias { + name = "test_alias_1" + } + + mappings = jsonencode({ + properties = { + field1 = { type = "text" } + } + }) + + settings { + setting { + name = "index.number_of_replicas" + value = "2" + } + } + + deletion_protection = false + wait_for_active_shards = "all" + master_timeout = "1m" + timeout = "1m" +} + +resource "elasticstack_elasticsearch_transform" "test_pivot" { + name = "%s" + description = "test description" + + source { + indices = [elasticstack_elasticsearch_index.test_index.name] + } + + destination { + index = "dest_index_for_transform" + } + + pivot = jsonencode({ + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id", + "missing_bucket": true + } + } + }, + "aggregations": { + "max_price": { + "max": { + "field": "taxful_total_price" + } + } + } + }) + frequency = "5m" + + defer_validation = false + timeout = "1m" +} + `, indexName, transformName) +} + func checkResourceTransformDestroy(s *terraform.State) error { client, err := clients.NewAcceptanceTestingClient() if err != nil { diff --git a/internal/models/transform.go b/internal/models/transform.go index 11cbce7ab..ab3d9557c 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -33,6 +33,11 @@ type PutTransformParams struct { Timeout time.Duration } +type UpdateTransformParams struct { + DeferValidation bool + Timeout time.Duration +} + type GetTransformResponse struct { Count json.Number `json:"count,omitempty"` Transforms []Transform `json:"transforms"` From 0c72f4a1c9acc09fce73e468552d3029fca4c890 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 16:15:46 +0200 Subject: [PATCH 03/26] adding missing properties for transform --- internal/elasticsearch/transform/transform.go | 112 ++++++++++++++++++ internal/models/transform.go | 48 ++++++-- 2 files changed, 151 insertions(+), 9 deletions(-) diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 017b66c65..6c0e921c2 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -128,6 +128,118 @@ func ResourceTransform() *schema.Resource { ValidateFunc: validation.StringIsJSON, DiffSuppressFunc: utils.DiffJsonSuppress, }, + "retention_policy": { + Description: "Defines a retention policy for the transform.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "time": { + Description: "Specifies that the transform uses a time field to set the retention policy.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Description: "The date field that is used to calculate the age of the document.", + Type: schema.TypeString, + Required: true, + }, + "max_age": { + Description: "Specifies the maximum age of a document in the destination index.", + Type: schema.TypeString, + Required: true, + ValidateFunc: utils.StringIsDuration, + }, + }, + }, + }, + }, + }, + }, + "sync": { + Description: "Defines the properties transforms require to run continuously.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "time": { + Description: "Specifies that the transform uses a time field to synchronize the source and destination indices.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Description: "The date field that is used to identify new documents in the source.", + Type: schema.TypeString, + Required: true, + }, + "delay": { + Description: "The time delay between the current time and the latest input data time. The default value is 60s.", + Type: schema.TypeString, + Optional: true, + Default: "60s", + ValidateFunc: utils.StringIsDuration, + }, + }, + }, + }, + }, + }, + }, + "settings": { + Description: "Defines optional transform settings.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "align_checkpoints": { + Description: "Specifies whether the transform checkpoint ranges should be optimized for performance. Default value is true.", + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "dates_as_epoch_millis": { + Description: "Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch.", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "deduce_mappings": { + Description: "Specifies whether the transform should deduce the destination index mappings from the transform config. The default value is true", + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "docs_per_second": { + Description: "Specifies a limit on the number of input documents per second. Default value is null, which disables throttling.", + Type: schema.TypeFloat, + Optional: true, + }, + "max_page_search_size": { + Description: "Defines the initial page size to use for the composite aggregation for each checkpoint. The default value is 500.", + Type: schema.TypeInt, + Optional: true, + }, + "num_failure_retries": { + Description: "Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries.", + Type: schema.TypeInt, + Optional: true, + }, + "unattended": { + Description: "In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false.", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, "defer_validation": { Type: schema.TypeBool, Description: "When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created.", diff --git a/internal/models/transform.go b/internal/models/transform.go index ab3d9557c..61e74694f 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -6,15 +6,17 @@ import ( ) type Transform struct { - Id string `json:"id,omitempty"` - Name string `json:"-"` - Description string `json:"description,omitempty"` - Source TransformSource `json:"source"` - Destination TransformDestination `json:"dest"` - Pivot interface{} `json:"pivot,omitempty"` - Latest interface{} `json:"latest,omitempty"` - Frequency string `json:"frequency,omitempty"` - Meta map[string]interface{} `json:"_meta,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"-"` + Description string `json:"description,omitempty"` + Source TransformSource `json:"source"` + Destination TransformDestination `json:"dest"` + Pivot interface{} `json:"pivot,omitempty"` + Latest interface{} `json:"latest,omitempty"` + Frequency string `json:"frequency,omitempty"` + RetentionPolicy TransformRetentionPolicy `json:"retention_policy,omitempty"` + Sync TransformSync `json:"sync,omitempty"` + Meta map[string]interface{} `json:"_meta,omitempty"` } type TransformSource struct { @@ -28,6 +30,34 @@ type TransformDestination struct { Pipeline string `json:"pipeline,omitempty"` } +type TransformRetentionPolicy struct { + Time TransformRetentionPolicyTime `json:"time"` +} + +type TransformRetentionPolicyTime struct { + Field string `json:"field"` + MaxAge string `json:"max_age"` +} + +type TransformSync struct { + Time TransformSyncTime `json:"time"` +} + +type TransformSyncTime struct { + Field string `json:"field"` + Delay string `json:"delay,omitempty"` +} + +type TransformSettings struct { + AlignCheckpoints *bool `json:"align_checkpoints,omitempty"` + DatesAsEpochMillis *bool `json:"dates_as_epoch_millis,omitempty"` + DeduceMappings *bool `json:"deduce_mappings,omitempty"` + DocsPerSecond *float64 `json:"docs_per_second,omitempty"` + MaxPageSearchSize *int `json:"max_page_search_size,omitempty"` + NumFailureRetries *int `json:"num_failure_retries,omitempty"` + Unattended *bool `json:"unattended,omitempty"` +} + type PutTransformParams struct { DeferValidation bool Timeout time.Duration From 91e5a0bb1ccc6e9b76d9f3db22e7960f981691a6 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 22:35:03 +0200 Subject: [PATCH 04/26] misc corrections --- internal/clients/elasticsearch/index.go | 12 ++- internal/elasticsearch/transform/transform.go | 95 ++++++++++++++++--- internal/models/transform.go | 23 ++--- internal/utils/validation.go | 50 ++++++++++ 4 files changed, 153 insertions(+), 27 deletions(-) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index e3c665812..4a24a0c6d 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -548,11 +548,13 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { fmt.Println("entering PutTransform") var diags diag.Diagnostics - pipelineBytes, err := json.Marshal(transform) + transformBytes, err := json.Marshal(transform) if err != nil { return diag.FromErr(err) } + fmt.Printf("%s\n", transformBytes) + esClient, err := apiClient.GetESClient() if err != nil { return diag.FromErr(err) @@ -564,7 +566,7 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * esClient.TransformPutTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformPutTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, opts...) if err != nil { return diag.FromErr(err) } @@ -615,11 +617,13 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { fmt.Println("entering UpdateTransform") var diags diag.Diagnostics - pipelineBytes, err := json.Marshal(transform) + transformBytes, err := json.Marshal(transform) if err != nil { return diag.FromErr(err) } + fmt.Printf("%s\n", transformBytes) + esClient, err := apiClient.GetESClient() if err != nil { return diag.FromErr(err) @@ -631,7 +635,7 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor esClient.TransformUpdateTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformUpdateTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, opts...) if err != nil { return diag.FromErr(err) } diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 6c0e921c2..843e9c5b0 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -100,7 +100,7 @@ func ResourceTransform() *schema.Resource { Description: "The pivot method transforms the data by aggregating and grouping it.", Type: schema.TypeString, Optional: true, - AtLeastOneOf: []string{"pivot", "latest"}, + ExactlyOneOf: []string{"pivot", "latest"}, DiffSuppressFunc: utils.DiffJsonSuppress, ValidateFunc: validation.StringIsJSON, ForceNew: true, @@ -109,7 +109,7 @@ func ResourceTransform() *schema.Resource { Description: "The latest method transforms the data by finding the latest document for each unique key.", Type: schema.TypeString, Optional: true, - AtLeastOneOf: []string{"pivot", "latest"}, + ExactlyOneOf: []string{"pivot", "latest"}, DiffSuppressFunc: utils.DiffJsonSuppress, ValidateFunc: validation.StringIsJSON, ForceNew: true, @@ -119,7 +119,7 @@ func ResourceTransform() *schema.Resource { Description: "The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`.", Optional: true, Default: "1m", - ValidateFunc: utils.StringIsDuration, + ValidateFunc: utils.StringIsElasticDuration, }, "metadata": { Description: "Defines optional transform metadata.", @@ -151,7 +151,7 @@ func ResourceTransform() *schema.Resource { Description: "Specifies the maximum age of a document in the destination index.", Type: schema.TypeString, Required: true, - ValidateFunc: utils.StringIsDuration, + ValidateFunc: utils.StringIsElasticDuration, }, }, }, @@ -183,7 +183,7 @@ func ResourceTransform() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "60s", - ValidateFunc: utils.StringIsDuration, + ValidateFunc: utils.StringIsElasticDuration, }, }, }, @@ -405,13 +405,12 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n if v, ok := d.GetOk("source"); ok { definedSource := v.([]interface{})[0].(map[string]interface{}) + transform.Source = new(models.TransformSource) indices := make([]string, 0) for _, i := range definedSource["indices"].([]interface{}) { indices = append(indices, i.(string)) } - transform.Source = models.TransformSource{ - Indices: indices, - } + transform.Source.Indices = indices if v, ok := definedSource["query"]; ok && len(v.(string)) > 0 { var query interface{} @@ -431,12 +430,13 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n } if v, ok := d.GetOk("destination"); ok { + definedDestination := v.([]interface{})[0].(map[string]interface{}) - transform.Destination = models.TransformDestination{ - Index: definedDestination["index"].(string), - } + transform.Destination = new(models.TransformDestination) - if pipeline, ok := definedDestination["pipeline"]; ok { + transform.Destination.Index = definedDestination["index"].(string) + + if pipeline, ok := definedDestination["pipeline"]; ok && len(pipeline.(string)) > 0 { transform.Destination.Pipeline = pipeline.(string) } } @@ -457,6 +457,10 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Latest = latest } + if v, ok := d.GetOk("frequency"); ok { + transform.Frequency = v.(string) + } + if v, ok := d.GetOk("metadata"); ok { metadata := make(map[string]interface{}) if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&metadata); err != nil { @@ -465,5 +469,72 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Meta = metadata } + if v, ok := d.GetOk("retention_policy"); ok && v != nil { + definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) + retentionTime := models.TransformRetentionPolicyTime{} + if v, ok := definedRetentionPolicy["time"]; ok { + var definedRetentionTime = v.([]interface{})[0].(map[string]interface{}) + if f, ok := definedRetentionTime["field"]; ok { + retentionTime.Field = f.(string) + } + if ma, ok := definedRetentionTime["max_age"]; ok { + retentionTime.MaxAge = ma.(string) + } + transform.RetentionPolicy = new(models.TransformRetentionPolicy) + transform.RetentionPolicy.Time = retentionTime + } + } + + if v, ok := d.GetOk("sync"); ok { + definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) + syncTime := models.TransformSyncTime{} + if v, ok := definedRetentionPolicy["time"]; ok { + var definedRetentionTime = v.([]interface{})[0].(map[string]interface{}) + if f, ok := definedRetentionTime["field"]; ok { + syncTime.Field = f.(string) + } + if d, ok := definedRetentionTime["delay"]; ok { + syncTime.Delay = d.(string) + } + transform.Sync = new(models.TransformSync) + transform.Sync.Time = syncTime + } + } + + if v, ok := d.GetOk("settings"); ok { + definedSettings := v.([]interface{})[0].(map[string]interface{}) + settings := models.TransformSettings{} + if v, ok := definedSettings["align_checkpoints"]; ok { + settings.AlignCheckpoints = new(bool) + *settings.AlignCheckpoints = v.(bool) + } + if v, ok := definedSettings["dates_as_epoch_millis"]; ok { + settings.DatesAsEpochMillis = new(bool) + *settings.DatesAsEpochMillis = v.(bool) + } + if v, ok := definedSettings["deduce_mappings"]; ok { + settings.DeduceMappings = new(bool) + *settings.DeduceMappings = v.(bool) + } + if v, ok := definedSettings["docs_per_second"]; ok { + settings.DocsPerSecond = new(float64) + *settings.DocsPerSecond = v.(float64) + } + if v, ok := definedSettings["max_page_search_size"]; ok { + settings.MaxPageSearchSize = new(int) + *settings.MaxPageSearchSize = v.(int) + } + if v, ok := definedSettings["num_failure_retries"]; ok { + settings.NumFailureRetries = new(int) + *settings.NumFailureRetries = v.(int) + } + if v, ok := definedSettings["unattended"]; ok { + settings.Unattended = new(bool) + *settings.Unattended = v.(bool) + } + + transform.Settings = &settings + } + return &transform, nil } diff --git a/internal/models/transform.go b/internal/models/transform.go index 61e74694f..945fc631c 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -6,17 +6,18 @@ import ( ) type Transform struct { - Id string `json:"id,omitempty"` - Name string `json:"-"` - Description string `json:"description,omitempty"` - Source TransformSource `json:"source"` - Destination TransformDestination `json:"dest"` - Pivot interface{} `json:"pivot,omitempty"` - Latest interface{} `json:"latest,omitempty"` - Frequency string `json:"frequency,omitempty"` - RetentionPolicy TransformRetentionPolicy `json:"retention_policy,omitempty"` - Sync TransformSync `json:"sync,omitempty"` - Meta map[string]interface{} `json:"_meta,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"-"` + Description string `json:"description,omitempty"` + Source *TransformSource `json:"source"` + Destination *TransformDestination `json:"dest"` + Pivot interface{} `json:"pivot,omitempty"` + Latest interface{} `json:"latest,omitempty"` + Frequency string `json:"frequency,omitempty"` + RetentionPolicy *TransformRetentionPolicy `json:"retention_policy,omitempty"` + Sync *TransformSync `json:"sync,omitempty"` + Meta map[string]interface{} `json:"_meta,omitempty"` + Settings *TransformSettings `json:"settings,omitempty"` } type TransformSource struct { diff --git a/internal/utils/validation.go b/internal/utils/validation.go index 38e90b6d8..8e78538b5 100644 --- a/internal/utils/validation.go +++ b/internal/utils/validation.go @@ -18,3 +18,53 @@ func StringIsDuration(i interface{}, k string) (warnings []string, errors []erro return nil, nil } + +// StringIsElasticDuration is a SchemaValidateFunc which tests to make sure the supplied string is valid duration using Elastic time units: +// d, h, m, s, ms, micros, nanos. (see https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units) +func StringIsElasticDuration(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if v == "" { + return nil, []error{fmt.Errorf("%q contains an invalid duration: [empty]", k)} + } + + firstPartCount := 0 + for v != "" { + // first part must contain only characters in range [0-9] and . + if ('0' <= v[0] && v[0] <= '9') || v[0] == '.' { + v = v[1:] + firstPartCount++ + continue + } + + if firstPartCount == 0 { + return nil, []error{fmt.Errorf("%q contains an invalid duration: should start with a numeric value", k)} + } + + if !isValidElasticTimeUnit(v) { + return nil, []error{fmt.Errorf("%q contains an invalid duration: unrecognized time unit [%s]", k, v)} + } + + break + } + + return nil, nil +} + +func isValidElasticTimeUnit(timeUnit string) bool { + switch timeUnit { + case + "d", + "h", + "m", + "s", + "ms", + "micros", + "nanos": + return true + } + return false +} From abb261b239d041a82f1b55f5aeebc2257d1707e9 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 7 Mar 2023 16:36:01 +0200 Subject: [PATCH 05/26] some documentation --- docs/resources/elasticsearch_transform.md | 185 ++++++++++++++++++ .../resource.tf | 48 +++++ internal/elasticsearch/transform/transform.go | 4 +- 3 files changed, 235 insertions(+), 2 deletions(-) create mode 100644 docs/resources/elasticsearch_transform.md create mode 100644 examples/resources/elasticstack_elasticsearch_transform/resource.tf diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md new file mode 100644 index 000000000..85f9f79cd --- /dev/null +++ b/docs/resources/elasticsearch_transform.md @@ -0,0 +1,185 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "elasticstack_elasticsearch_transform Resource - terraform-provider-elasticstack" +subcategory: "" +description: |- + Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html +--- + +# elasticstack_elasticsearch_transform (Resource) + +Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html + +## Example Usage + +```terraform +resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { + name = "transform-pivot" + description = "A meaningful description" + + source { + indices = ["name_or_pattern_for_input_index"] + } + + destination { + index = "destination_index_for_transform" + } + + pivot = jsonencode({ + "group_by" : { + "customer_id" : { + "terms" : { + "field" : "customer_id", + "missing_bucket" : true + } + } + }, + "aggregations" : { + "max_price" : { + "max" : { + "field" : "taxful_total_price" + } + } + } + }) + + frequency = "5m" + + retention_policy { + time { + field = "order_date" + max_age = "30d" + } + } + + sync { + time { + field = "order_date" + delay = "10s" + } + } + + defer_validation = false +} +``` + + +## Schema + +### Required + +- `destination` (Block List, Min: 1, Max: 1) The destination for the transform. (see [below for nested schema](#nestedblock--destination)) +- `name` (String) Name of the transform you wish to create. +- `source` (Block List, Min: 1, Max: 1) The source of the data for the transform. (see [below for nested schema](#nestedblock--source)) + +### Optional + +- `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. +- `description` (String) Free text description of the transform. +- `elasticsearch_connection` (Block List, Max: 1, Deprecated) Elasticsearch connection configuration block. This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead. (see [below for nested schema](#nestedblock--elasticsearch_connection)) +- `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`. +- `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present. +- `metadata` (String) Defines optional transform metadata. +- `pivot` (String) The pivot method transforms the data by aggregating and grouping it. JSON definition expected. Either 'pivot' or 'latest' must be present. +- `retention_policy` (Block List, Max: 1) Defines a retention policy for the transform. (see [below for nested schema](#nestedblock--retention_policy)) +- `settings` (Block List, Max: 1) Defines optional transform settings. (see [below for nested schema](#nestedblock--settings)) +- `sync` (Block List, Max: 1) Defines the properties transforms require to run continuously. (see [below for nested schema](#nestedblock--sync)) +- `timeout` (String) Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. + +### Read-Only + +- `id` (String) Internal identifier of the resource + + +### Nested Schema for `destination` + +Required: + +- `index` (String) The destination index for the transform. + +Optional: + +- `pipeline` (String) The unique identifier for an ingest pipeline. + + + +### Nested Schema for `source` + +Required: + +- `indices` (List of String) The source indices for the transform. + +Optional: + +- `query` (String) A query clause that retrieves a subset of data from the source index. +- `runtime_mappings` (String) Definitions of search-time runtime fields that can be used by the transform. + + + +### Nested Schema for `elasticsearch_connection` + +Optional: + +- `api_key` (String, Sensitive) API Key to use for authentication to Elasticsearch +- `ca_data` (String) PEM-encoded custom Certificate Authority certificate +- `ca_file` (String) Path to a custom Certificate Authority certificate +- `cert_data` (String) PEM encoded certificate for client auth +- `cert_file` (String) Path to a file containing the PEM encoded certificate for client auth +- `endpoints` (List of String, Sensitive) A list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number. +- `insecure` (Boolean) Disable TLS certificate validation +- `key_data` (String, Sensitive) PEM encoded private key for client auth +- `key_file` (String) Path to a file containing the PEM encoded private key for client auth +- `password` (String, Sensitive) Password to use for API authentication to Elasticsearch. +- `username` (String) Username to use for API authentication to Elasticsearch. + + + +### Nested Schema for `retention_policy` + +Required: + +- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to set the retention policy. (see [below for nested schema](#nestedblock--retention_policy--time)) + + +### Nested Schema for `retention_policy.time` + +Required: + +- `field` (String) The date field that is used to calculate the age of the document. +- `max_age` (String) Specifies the maximum age of a document in the destination index. + + + + +### Nested Schema for `settings` + +Optional: + +- `align_checkpoints` (Boolean) Specifies whether the transform checkpoint ranges should be optimized for performance. Default value is true. +- `dates_as_epoch_millis` (Boolean) Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch. +- `deduce_mappings` (Boolean) Specifies whether the transform should deduce the destination index mappings from the transform config. The default value is true +- `docs_per_second` (Number) Specifies a limit on the number of input documents per second. Default value is null, which disables throttling. +- `max_page_search_size` (Number) Defines the initial page size to use for the composite aggregation for each checkpoint. The default value is 500. +- `num_failure_retries` (Number) Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries. +- `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false. + + + +### Nested Schema for `sync` + +Required: + +- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to synchronize the source and destination indices. (see [below for nested schema](#nestedblock--sync--time)) + + +### Nested Schema for `sync.time` + +Required: + +- `field` (String) The date field that is used to identify new documents in the source. + +Optional: + +- `delay` (String) The time delay between the current time and the latest input data time. The default value is 60s. + + diff --git a/examples/resources/elasticstack_elasticsearch_transform/resource.tf b/examples/resources/elasticstack_elasticsearch_transform/resource.tf new file mode 100644 index 000000000..ce8a53230 --- /dev/null +++ b/examples/resources/elasticstack_elasticsearch_transform/resource.tf @@ -0,0 +1,48 @@ +resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { + name = "transform-pivot" + description = "A meaningful description" + + source { + indices = ["name_or_pattern_for_input_index"] + } + + destination { + index = "destination_index_for_transform" + } + + pivot = jsonencode({ + "group_by" : { + "customer_id" : { + "terms" : { + "field" : "customer_id", + "missing_bucket" : true + } + } + }, + "aggregations" : { + "max_price" : { + "max" : { + "field" : "taxful_total_price" + } + } + } + }) + + frequency = "5m" + + retention_policy { + time { + field = "order_date" + max_age = "30d" + } + } + + sync { + time { + field = "order_date" + delay = "10s" + } + } + + defer_validation = false +} \ No newline at end of file diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 843e9c5b0..a80ec43e9 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -97,7 +97,7 @@ func ResourceTransform() *schema.Resource { }, }, "pivot": { - Description: "The pivot method transforms the data by aggregating and grouping it.", + Description: "The pivot method transforms the data by aggregating and grouping it. JSON definition expected. Either 'pivot' or 'latest' must be present.", Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"pivot", "latest"}, @@ -106,7 +106,7 @@ func ResourceTransform() *schema.Resource { ForceNew: true, }, "latest": { - Description: "The latest method transforms the data by finding the latest document for each unique key.", + Description: "The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present.", Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"pivot", "latest"}, From 2f32dc89f4d562ba2ca68a71831819dfa588f327 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 7 Mar 2023 18:27:16 +0200 Subject: [PATCH 06/26] support for transform start/stop --- docs/resources/elasticsearch_transform.md | 26 ++++++++------- .../resource.tf | 4 ++- internal/clients/elasticsearch/index.go | 33 +++++++++++++++---- internal/elasticsearch/transform/transform.go | 20 +++++++---- .../elasticsearch/transform/transform_test.go | 7 ++-- internal/models/transform.go | 2 ++ 6 files changed, 64 insertions(+), 28 deletions(-) diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 85f9f79cd..495191fe1 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -1,7 +1,7 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "elasticstack_elasticsearch_transform Resource - terraform-provider-elasticstack" -subcategory: "" +page_title: 'elasticstack_elasticsearch_transform Resource - terraform-provider-elasticstack' +subcategory: '' description: |- Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html --- @@ -59,11 +59,14 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { } } + enabled = false + defer_validation = false } ``` + ## Schema ### Required @@ -77,6 +80,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. - `description` (String) Free text description of the transform. - `elasticsearch_connection` (Block List, Max: 1, Deprecated) Elasticsearch connection configuration block. This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead. (see [below for nested schema](#nestedblock--elasticsearch_connection)) +- `enabled` (Boolean) Controls wether the transform is started or stopped. Default is `false` (stopped). - `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`. - `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present. - `metadata` (String) Defines optional transform metadata. @@ -84,13 +88,14 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `retention_policy` (Block List, Max: 1) Defines a retention policy for the transform. (see [below for nested schema](#nestedblock--retention_policy)) - `settings` (Block List, Max: 1) Defines optional transform settings. (see [below for nested schema](#nestedblock--settings)) - `sync` (Block List, Max: 1) Defines the properties transforms require to run continuously. (see [below for nested schema](#nestedblock--sync)) -- `timeout` (String) Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. +- `timeout` (String) Period to wait for a response from Elastisearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`. ### Read-Only - `id` (String) Internal identifier of the resource + ### Nested Schema for `destination` Required: @@ -101,8 +106,8 @@ Optional: - `pipeline` (String) The unique identifier for an ingest pipeline. - + ### Nested Schema for `source` Required: @@ -114,8 +119,8 @@ Optional: - `query` (String) A query clause that retrieves a subset of data from the source index. - `runtime_mappings` (String) Definitions of search-time runtime fields that can be used by the transform. - + ### Nested Schema for `elasticsearch_connection` Optional: @@ -132,8 +137,8 @@ Optional: - `password` (String, Sensitive) Password to use for API authentication to Elasticsearch. - `username` (String) Username to use for API authentication to Elasticsearch. - + ### Nested Schema for `retention_policy` Required: @@ -141,6 +146,7 @@ Required: - `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to set the retention policy. (see [below for nested schema](#nestedblock--retention_policy--time)) + ### Nested Schema for `retention_policy.time` Required: @@ -148,9 +154,8 @@ Required: - `field` (String) The date field that is used to calculate the age of the document. - `max_age` (String) Specifies the maximum age of a document in the destination index. - - + ### Nested Schema for `settings` Optional: @@ -163,8 +168,8 @@ Optional: - `num_failure_retries` (Number) Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries. - `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false. - + ### Nested Schema for `sync` Required: @@ -172,6 +177,7 @@ Required: - `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to synchronize the source and destination indices. (see [below for nested schema](#nestedblock--sync--time)) + ### Nested Schema for `sync.time` Required: @@ -181,5 +187,3 @@ Required: Optional: - `delay` (String) The time delay between the current time and the latest input data time. The default value is 60s. - - diff --git a/examples/resources/elasticstack_elasticsearch_transform/resource.tf b/examples/resources/elasticstack_elasticsearch_transform/resource.tf index ce8a53230..68e44b8d5 100644 --- a/examples/resources/elasticstack_elasticsearch_transform/resource.tf +++ b/examples/resources/elasticstack_elasticsearch_transform/resource.tf @@ -44,5 +44,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { } } + enabled = false + defer_validation = false -} \ No newline at end of file +} diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index 4a24a0c6d..5009fc9b5 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -546,7 +546,7 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam } func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { - fmt.Println("entering PutTransform") + fmt.Println("entering PutTransform for", transform.Name) var diags diag.Diagnostics transformBytes, err := json.Marshal(transform) if err != nil { @@ -576,11 +576,18 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * return diags } + if params.Enabled { + _, err := esClient.TransformStartTransform(transform.Name) + if err != nil { + return diag.FromErr(err) + } + } + return diags } func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.Transform, diag.Diagnostics) { - fmt.Println("entering GetTransform for ", *name) + fmt.Println("entering GetTransform for", *name) var diags diag.Diagnostics esClient, err := apiClient.GetESClient() if err != nil { @@ -615,7 +622,7 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin } func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { - fmt.Println("entering UpdateTransform") + fmt.Println("entering UpdateTransform with Enabled", params.Enabled) var diags diag.Diagnostics transformBytes, err := json.Marshal(transform) if err != nil { @@ -645,11 +652,23 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor return diags } + if params.Enabled { + _, err := esClient.TransformStartTransform(transform.Name) + if err != nil { + return diag.FromErr(err) + } + } else { + _, err := esClient.TransformStopTransform(transform.Name) + if err != nil { + return diag.FromErr(err) + } + } + return diags } -func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name string) diag.Diagnostics { - fmt.Println("entering DeleteTransform for ", name) +func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) diag.Diagnostics { + fmt.Println("entering DeleteTransform for", *name) var diags diag.Diagnostics esClient, err := apiClient.GetESClient() @@ -657,12 +676,12 @@ func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name str return diag.FromErr(err) } - res, err := esClient.TransformDeleteTransform(name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) + res, err := esClient.TransformDeleteTransform(*name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) if err != nil { return diag.FromErr(err) } defer res.Body.Close() - if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete the transform: %s", name)); diags.HasError() { + if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete the transform: %s", *name)); diags.HasError() { return diags } diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index a80ec43e9..532ced977 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -248,11 +248,17 @@ func ResourceTransform() *schema.Resource { }, "timeout": { Type: schema.TypeString, - Description: "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`.", + Description: "Period to wait for a response from Elastisearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`.", Optional: true, Default: "30s", ValidateFunc: utils.StringIsDuration, }, + "enabled": { + Type: schema.TypeBool, + Description: "Controls wether the transform is started or stopped. Default is `false` (stopped).", + Optional: true, + Default: false, + }, } utils.AddConnectionSchema(transformSchema) @@ -269,7 +275,6 @@ func ResourceTransform() *schema.Resource { } func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformCreate") client, diags := clients.NewApiClient(d, meta) if diags.HasError() { @@ -297,6 +302,8 @@ func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta i } params.Timeout = timeout + params.Enabled = d.Get("enabled").(bool) + if diags := elasticsearch.PutTransform(ctx, client, transform, ¶ms); diags.HasError() { return diags } @@ -306,7 +313,7 @@ func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta i } func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformRead") + client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags @@ -335,7 +342,6 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int } func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformUpdate") client, diags := clients.NewApiClient(d, meta) if diags.HasError() { @@ -366,6 +372,8 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i } params.Timeout = timeout + params.Enabled = d.Get("enabled").(bool) + if diags := elasticsearch.UpdateTransform(ctx, client, updatedTransform, ¶ms); diags.HasError() { return diags } @@ -374,7 +382,7 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i } func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformDelete") + client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags @@ -386,7 +394,7 @@ func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } - if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + if diags := elasticsearch.DeleteTransform(ctx, client, &compId.ResourceId); diags.HasError() { return diags } diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index 703420cc2..118e19fcf 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -1,14 +1,11 @@ package transform_test import ( - //"context" "fmt" - //"regexp" "testing" "github.com/elastic/terraform-provider-elasticstack/internal/acctest" "github.com/elastic/terraform-provider-elasticstack/internal/clients" - //"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/transform" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -128,6 +125,7 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) frequency = "5m" + enabled = false defer_validation = true timeout = "1m" @@ -171,6 +169,7 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) frequency = "10m" + enabled = true defer_validation = true timeout = "1m" @@ -201,6 +200,7 @@ resource "elasticstack_elasticsearch_transform" "test_latest" { "sort": "order_date" }) frequency = "2m" + enabled = false defer_validation = true timeout = "1m" @@ -270,6 +270,7 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) frequency = "5m" + enabled = false defer_validation = false timeout = "1m" diff --git a/internal/models/transform.go b/internal/models/transform.go index 945fc631c..7a13f4a80 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -62,11 +62,13 @@ type TransformSettings struct { type PutTransformParams struct { DeferValidation bool Timeout time.Duration + Enabled bool } type UpdateTransformParams struct { DeferValidation bool Timeout time.Duration + Enabled bool } type GetTransformResponse struct { From df3201b11c9f77da502c18213df5f54da67a4b68 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 7 Mar 2023 19:05:37 +0200 Subject: [PATCH 07/26] addiong options on the client calls for transform start/stop --- internal/clients/elasticsearch/index.go | 26 ++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index 5009fc9b5..fc5b57176 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -560,13 +560,13 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * return diag.FromErr(err) } - opts := []func(*esapi.TransformPutTransformRequest){ + putOptions := []func(*esapi.TransformPutTransformRequest){ esClient.TransformPutTransform.WithContext(ctx), esClient.TransformPutTransform.WithDeferValidation(params.DeferValidation), esClient.TransformPutTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, opts...) + res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, putOptions...) if err != nil { return diag.FromErr(err) } @@ -577,7 +577,11 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * } if params.Enabled { - _, err := esClient.TransformStartTransform(transform.Name) + startOptions := []func(*esapi.TransformStartTransformRequest){ + esClient.TransformStartTransform.WithContext(ctx), + esClient.TransformStartTransform.WithTimeout(params.Timeout), + } + _, err := esClient.TransformStartTransform(transform.Name, startOptions...) if err != nil { return diag.FromErr(err) } @@ -636,13 +640,13 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor return diag.FromErr(err) } - opts := []func(*esapi.TransformUpdateTransformRequest){ + updateOptions := []func(*esapi.TransformUpdateTransformRequest){ esClient.TransformUpdateTransform.WithContext(ctx), esClient.TransformUpdateTransform.WithDeferValidation(params.DeferValidation), esClient.TransformUpdateTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, opts...) + res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, updateOptions...) if err != nil { return diag.FromErr(err) } @@ -653,12 +657,20 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor } if params.Enabled { - _, err := esClient.TransformStartTransform(transform.Name) + startOptions := []func(*esapi.TransformStartTransformRequest){ + esClient.TransformStartTransform.WithContext(ctx), + esClient.TransformStartTransform.WithTimeout(params.Timeout), + } + _, err := esClient.TransformStartTransform(transform.Name, startOptions...) if err != nil { return diag.FromErr(err) } } else { - _, err := esClient.TransformStopTransform(transform.Name) + stopOptions := []func(*esapi.TransformStopTransformRequest){ + esClient.TransformStopTransform.WithContext(ctx), + esClient.TransformStopTransform.WithTimeout(params.Timeout), + } + _, err := esClient.TransformStopTransform(transform.Name, stopOptions...) if err != nil { return diag.FromErr(err) } From 7840c939f1b146964506d6d8467c64e952098bd5 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 13:15:23 +0200 Subject: [PATCH 08/26] initial commit --- internal/clients/elasticsearch/index.go | 88 +++++ internal/elasticsearch/transform/transform.go | 335 ++++++++++++++++++ .../elasticsearch/transform/transform_test.go | 115 ++++++ internal/models/transform.go | 39 ++ provider/provider.go | 2 + 5 files changed, 579 insertions(+) create mode 100644 internal/elasticsearch/transform/transform.go create mode 100644 internal/elasticsearch/transform/transform_test.go create mode 100644 internal/models/transform.go diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index b9c3054fb..28bfe3075 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -544,3 +544,91 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam } return diags } + +func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { + fmt.Println("entering PutTransform") + var diags diag.Diagnostics + pipelineBytes, err := json.Marshal(transform) + if err != nil { + return diag.FromErr(err) + } + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + opts := []func(*esapi.TransformPutTransformRequest){ + esClient.TransformPutTransform.WithContext(ctx), + esClient.TransformPutTransform.WithDeferValidation(params.DeferValidation), + esClient.TransformPutTransform.WithTimeout(params.Timeout), + } + + res, err := esClient.TransformPutTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + if err != nil { + return diag.FromErr(err) + } + + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to create transform: %s", transform.Name)); diags.HasError() { + return diags + } + + return diags +} + +func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.Transform, diag.Diagnostics) { + fmt.Println("entering GetTransform for ", *name) + var diags diag.Diagnostics + esClient, err := apiClient.GetESClient() + if err != nil { + return nil, diag.FromErr(err) + } + req := esClient.TransformGetTransform.WithTransformID(*name) + res, err := esClient.TransformGetTransform(req, esClient.TransformGetTransform.WithContext(ctx)) + if err != nil { + return nil, diag.FromErr(err) + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return nil, nil + } + if diags := utils.CheckError(res, fmt.Sprintf("Unable to get requested transform: %s", *name)); diags.HasError() { + return nil, diags + } + + transformsResponse := models.GetTransformResponse{} + if err := json.NewDecoder(res.Body).Decode(&transformsResponse); err != nil { + return nil, diag.FromErr(err) + } + + for _, t := range transformsResponse.Transforms { + if t.Id == *name { + t.Name = *name + return &t, diags + } + } + + return nil, diags +} + +func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name string) diag.Diagnostics { + fmt.Println("entering DeleteTransform for ", name) + var diags diag.Diagnostics + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + res, err := esClient.TransformDeleteTransform(name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) + if err != nil { + return diag.FromErr(err) + } + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete the transform: %s", name)); diags.HasError() { + return diags + } + + return diags +} diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go new file mode 100644 index 000000000..c6ca4ea36 --- /dev/null +++ b/internal/elasticsearch/transform/transform.go @@ -0,0 +1,335 @@ +package transform + +import ( + "context" + "encoding/json" + "fmt" + //"reflect" + "regexp" + //"strconv" + "strings" + "time" + + "github.com/elastic/terraform-provider-elasticstack/internal/clients" + "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch" + "github.com/elastic/terraform-provider-elasticstack/internal/models" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + //"github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + //"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceTransform() *schema.Resource { + transformSchema := map[string]*schema.Schema{ + "id": { + Description: "Internal identifier of the resource", + Type: schema.TypeString, + Computed: true, + }, + "name": { + Description: "Name of the transform you wish to create.", + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[a-z0-9_-]+$`), "must contain only lower case alphanumeric characters, hyphens, and underscores"), + validation.StringMatch(regexp.MustCompile(`^[a-z0-9].*[a-z0-9]$`), "must start and end with a lowercase alphanumeric character"), + ), + }, + "description": { + Description: "Free text description of the transform.", + Type: schema.TypeString, + Optional: true, + }, + "source": { + Description: "The source of the data for the transform.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "indices": { + Description: "The source indices for the transform.", + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query": { + Description: "A query clause that retrieves a subset of data from the source index.", + Type: schema.TypeString, + Optional: true, + Default: `{"match_all":{}}}`, + DiffSuppressFunc: utils.DiffJsonSuppress, + ValidateFunc: validation.StringIsJSON, + }, + "runtime_mappings": { + Description: "Definitions of search-time runtime fields that can be used by the transform.", + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "destination": { + Description: "The destination for the transform.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "index": { + Description: "The destination index for the transform.", + Type: schema.TypeString, + Required: true, + }, + "pipeline": { + Description: "The unique identifier for an ingest pipeline.", + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "pivot": { + Description: "The pivot method transforms the data by aggregating and grouping it.", + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"pivot", "latest"}, + DiffSuppressFunc: utils.DiffJsonSuppress, + ValidateFunc: validation.StringIsJSON, + ForceNew: true, + }, + "latest": { + Description: "The latest method transforms the data by finding the latest document for each unique key.", + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"pivot", "latest"}, + DiffSuppressFunc: utils.DiffJsonSuppress, + ValidateFunc: validation.StringIsJSON, + ForceNew: true, + }, + "frequency": { + Type: schema.TypeString, + Description: "The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`.", + Optional: true, + Default: "1m", + ValidateFunc: utils.StringIsDuration, + }, + "metadata": { + Description: "Defines optional transform metadata.", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: utils.DiffJsonSuppress, + }, + "defer_validation": { + Type: schema.TypeBool, + Description: "When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created.", + Optional: true, + Default: false, + }, + "timeout": { + Type: schema.TypeString, + Description: "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`.", + Optional: true, + Default: "30s", + ValidateFunc: utils.StringIsDuration, + }, + } + + utils.AddConnectionSchema(transformSchema) + + return &schema.Resource{ + Schema: transformSchema, + Description: "Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html", + + CreateContext: resourceTransformCreate, + ReadContext: resourceTransformRead, + UpdateContext: resourceTransformUpdate, + DeleteContext: resourceTransformDelete, + } +} + +func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformCreate") + + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + + transformName := d.Get("name").(string) + id, diags := client.ID(ctx, transformName) + if diags.HasError() { + return diags + } + + transform, err := getTransformFromResourceData(ctx, d, transformName) + if err != nil { + return diag.FromErr(err) + } + + params := models.PutTransformParams{ + DeferValidation: d.Get("defer_validation").(bool), + } + + timeout, err := time.ParseDuration(d.Get("timeout").(string)) + if err != nil { + return diag.FromErr(err) + } + params.Timeout = timeout + + if diags := elasticsearch.PutTransform(ctx, client, transform, ¶ms); diags.HasError() { + return diags + } + + d.SetId(id.String()) + return resourceTransformRead(ctx, d, meta) +} + +func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformRead") + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + compId, diags := clients.CompositeIdFromStr(d.Id()) + if diags.HasError() { + return diags + } + + transformName := compId.ResourceId + if err := d.Set("name", transformName); err != nil { + return diag.FromErr(err) + } + + transform, diags := elasticsearch.GetTransform(ctx, client, &transformName) + if transform == nil && diags == nil { + tflog.Warn(ctx, fmt.Sprintf(`Transform "%s" not found, removing from state`, compId.ResourceId)) + d.SetId("") + return diags + } + if diags.HasError() { + return diags + } + + return diags +} + +func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformDelete") + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + + id := d.Id() + compId, diags := clients.CompositeIdFromStr(id) + if diags.HasError() { + return diags + } + + if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + return diags + } + + return diags +} + +func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformUpdate") + // TODO + client, diags := clients.NewApiClient(d, meta) + if diags.HasError() { + return diags + } + + transformName := d.Get("name").(string) + _, diags = client.ID(ctx, transformName) + if diags.HasError() { + return diags + } + + return resourceTransformRead(ctx, d, meta) +} + +func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, name string) (*models.Transform, error) { + + var transform models.Transform + transform.Name = name + + if v, ok := d.GetOk("description"); ok { + transform.Description = v.(string) + } + + if v, ok := d.GetOk("source"); ok { + definedSource := v.([]interface{})[0].(map[string]interface{}) + + indices := make([]string, 0) + for _, i := range definedSource["indices"].([]interface{}) { + indices = append(indices, i.(string)) + } + transform.Source = models.TransformSource{ + Indices: indices, + } + + if v, ok := definedSource["query"]; ok && len(v.(string)) > 0 { + var query interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&query); err != nil { + return nil, err + } + transform.Source.Query = query + } + + if v, ok := definedSource["runtime_mappings"]; ok && len(v.(string)) > 0 { + var runtimeMappings interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&runtimeMappings); err != nil { + return nil, err + } + transform.Source.RuntimeMappings = runtimeMappings + } + } + + if v, ok := d.GetOk("destination"); ok { + definedDestination := v.([]interface{})[0].(map[string]interface{}) + transform.Destination = models.TransformDestination{ + Index: definedDestination["index"].(string), + } + + if pipeline, ok := definedDestination["pipeline"]; ok { + transform.Destination.Pipeline = pipeline.(string) + } + } + + if v, ok := d.GetOk("pivot"); ok { + var pivot interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&pivot); err != nil { + return nil, err + } + transform.Pivot = pivot + } + + if v, ok := d.GetOk("latest"); ok { + var latest interface{} + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&latest); err != nil { + return nil, err + } + transform.Latest = latest + } + + if v, ok := d.GetOk("metadata"); ok { + metadata := make(map[string]interface{}) + if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&metadata); err != nil { + return nil, err + } + transform.Meta = metadata + } + + return &transform, nil +} diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go new file mode 100644 index 000000000..cac2dab8f --- /dev/null +++ b/internal/elasticsearch/transform/transform_test.go @@ -0,0 +1,115 @@ +package transform_test + +import ( + //"context" + "fmt" + //"regexp" + "testing" + + "github.com/elastic/terraform-provider-elasticstack/internal/acctest" + "github.com/elastic/terraform-provider-elasticstack/internal/clients" + //"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/transform" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccResourceTransform(t *testing.T) { + transformName := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceTransformDestroy, + ProtoV5ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + Config: testAccResourceTransformCreate(transformName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "name", transformName), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "description", "test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "frequency", "5m"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "pivot.#", "1"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "latest.#", "0"), + ), + }, + // { + // Config: testAccResourceTransformUpdate(transformName), + // Check: resource.ComposeTestCheckFunc(), + // }, + }, + }) +} + +func testAccResourceTransformCreate(name string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_transform" "test" { + name = "%s" + description = "test description" + + source { + indices = ["source_index_for_transform"] + } + + destination { + index = "dest_index_for_transform" + } + + pivot = jsonencode({ + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id", + "missing_bucket": true + } + } + }, + "aggregations": { + "max_price": { + "max": { + "field": "taxful_total_price" + } + } + } + }) + frequency = "5m" + + defer_validation = true + timeout = "1m" +} + `, name) +} + +func checkResourceTransformDestroy(s *terraform.State) error { + client, err := clients.NewAcceptanceTestingClient() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "elasticstack_elasticsearch_transform" { + continue + } + compId, _ := clients.CompositeIdFromStr(rs.Primary.ID) + + esClient, err := client.GetESClient() + if err != nil { + return err + } + req := esClient.TransformGetTransform.WithTransformID(compId.ResourceId) + res, err := esClient.TransformGetTransform(req) + if err != nil { + return err + } + + if res.StatusCode != 404 { + return fmt.Errorf("Transform (%s) still exists", compId.ResourceId) + } + } + return nil +} diff --git a/internal/models/transform.go b/internal/models/transform.go new file mode 100644 index 000000000..11cbce7ab --- /dev/null +++ b/internal/models/transform.go @@ -0,0 +1,39 @@ +package models + +import ( + "encoding/json" + "time" +) + +type Transform struct { + Id string `json:"id,omitempty"` + Name string `json:"-"` + Description string `json:"description,omitempty"` + Source TransformSource `json:"source"` + Destination TransformDestination `json:"dest"` + Pivot interface{} `json:"pivot,omitempty"` + Latest interface{} `json:"latest,omitempty"` + Frequency string `json:"frequency,omitempty"` + Meta map[string]interface{} `json:"_meta,omitempty"` +} + +type TransformSource struct { + Indices []string `json:"index"` + Query interface{} `json:"query,omitempty"` + RuntimeMappings interface{} `json:"runtime_mappings,omitempty"` +} + +type TransformDestination struct { + Index string `json:"index"` + Pipeline string `json:"pipeline,omitempty"` +} + +type PutTransformParams struct { + DeferValidation bool + Timeout time.Duration +} + +type GetTransformResponse struct { + Count json.Number `json:"count,omitempty"` + Transforms []Transform `json:"transforms"` +} diff --git a/provider/provider.go b/provider/provider.go index ea0e7dd49..08b8aa64f 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -7,6 +7,7 @@ import ( "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/ingest" "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/logstash" "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/security" + "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/transform" "github.com/elastic/terraform-provider-elasticstack/internal/kibana" providerSchema "github.com/elastic/terraform-provider-elasticstack/internal/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -87,6 +88,7 @@ func New(version string) *schema.Provider { "elasticstack_elasticsearch_snapshot_lifecycle": cluster.ResourceSlm(), "elasticstack_elasticsearch_snapshot_repository": cluster.ResourceSnapshotRepository(), "elasticstack_elasticsearch_script": cluster.ResourceScript(), + "elasticstack_elasticsearch_transform": transform.ResourceTransform(), "elasticstack_kibana_space": kibana.ResourceSpace(), }, From de26951f7775ff3bd4f23301b49930d13792a5b6 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 15:08:33 +0200 Subject: [PATCH 09/26] support for Update --- internal/clients/elasticsearch/index.go | 32 +++ internal/elasticsearch/transform/transform.go | 46 +++- .../elasticsearch/transform/transform_test.go | 224 ++++++++++++++++-- internal/models/transform.go | 5 + 4 files changed, 279 insertions(+), 28 deletions(-) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index 28bfe3075..e3c665812 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -612,6 +612,38 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin return nil, diags } +func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { + fmt.Println("entering UpdateTransform") + var diags diag.Diagnostics + pipelineBytes, err := json.Marshal(transform) + if err != nil { + return diag.FromErr(err) + } + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + opts := []func(*esapi.TransformUpdateTransformRequest){ + esClient.TransformUpdateTransform.WithContext(ctx), + esClient.TransformUpdateTransform.WithDeferValidation(params.DeferValidation), + esClient.TransformUpdateTransform.WithTimeout(params.Timeout), + } + + res, err := esClient.TransformUpdateTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + if err != nil { + return diag.FromErr(err) + } + + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to update transform: %s", transform.Name)); diags.HasError() { + return diags + } + + return diags +} + func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name string) diag.Diagnostics { fmt.Println("entering DeleteTransform for ", name) var diags diag.Diagnostics diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index c6ca4ea36..017b66c65 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -222,41 +222,63 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int return diags } -func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformDelete") +func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformUpdate") + client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags } - id := d.Id() - compId, diags := clients.CompositeIdFromStr(id) + transformName := d.Get("name").(string) + _, diags = client.ID(ctx, transformName) if diags.HasError() { return diags } - if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + updatedTransform, err := getTransformFromResourceData(ctx, d, transformName) + if err != nil { + return diag.FromErr(err) + } + + updatedTransform.Pivot = nil + updatedTransform.Latest = nil + + params := models.UpdateTransformParams{ + DeferValidation: d.Get("defer_validation").(bool), + } + + timeout, err := time.ParseDuration(d.Get("timeout").(string)) + if err != nil { + return diag.FromErr(err) + } + params.Timeout = timeout + + if diags := elasticsearch.UpdateTransform(ctx, client, updatedTransform, ¶ms); diags.HasError() { return diags } - return diags + return resourceTransformRead(ctx, d, meta) } -func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformUpdate") - // TODO +func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + fmt.Println("entering resourceTransformDelete") client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags } - transformName := d.Get("name").(string) - _, diags = client.ID(ctx, transformName) + id := d.Id() + compId, diags := clients.CompositeIdFromStr(id) if diags.HasError() { return diags } - return resourceTransformRead(ctx, d, meta) + if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + return diags + } + + return diags } func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, name string) (*models.Transform, error) { diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index cac2dab8f..703420cc2 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -14,41 +14,91 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) -func TestAccResourceTransform(t *testing.T) { - transformName := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) +func TestAccResourceTransformWithPivot(t *testing.T) { + + transformNamePivot := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceTransformDestroy, + ProtoV5ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + Config: testAccResourceTransformWithPivotCreate(transformNamePivot), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "name", transformNamePivot), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "description", "test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "5m"), + ), + }, + { + Config: testAccResourceTransformWithPivotUpdate(transformNamePivot), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "name", transformNamePivot), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "description", "yet another test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.1", "additional_index"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform_v2"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "10m"), + ), + }, + }, + }) +} +func TestAccResourceTransformWithLatest(t *testing.T) { + + transformNameLatest := sdkacctest.RandStringFromCharSet(20, sdkacctest.CharSetAlphaNum) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceTransformDestroy, + ProtoV5ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + Config: testAccResourceTransformWithLatestCreate(transformNameLatest), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "name", transformNameLatest), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "description", "test description (latest)"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "source.0.indices.0", "source_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "frequency", "2m"), + ), + }, + }, + }) +} + +func TestAccResourceTransformNoDefer(t *testing.T) { + + transformName := sdkacctest.RandStringFromCharSet(18, sdkacctest.CharSetAlphaNum) + indexName := sdkacctest.RandStringFromCharSet(22, sdkacctest.CharSetAlphaNum) resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, CheckDestroy: checkResourceTransformDestroy, ProtoV5ProviderFactories: acctest.Providers, Steps: []resource.TestStep{ { - Config: testAccResourceTransformCreate(transformName), + Config: testAccResourceTransformNoDeferCreate(transformName, indexName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "name", transformName), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "description", "test description"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "source.0.indices.0", "source_index_for_transform"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "destination.0.index", "dest_index_for_transform"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "frequency", "5m"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "pivot.#", "1"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test", "latest.#", "0"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "name", transformName), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "description", "test description"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", indexName), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "5m"), ), }, - // { - // Config: testAccResourceTransformUpdate(transformName), - // Check: resource.ComposeTestCheckFunc(), - // }, }, }) } -func testAccResourceTransformCreate(name string) string { +func testAccResourceTransformWithPivotCreate(name string) string { return fmt.Sprintf(` provider "elasticstack" { elasticsearch {} } -resource "elasticstack_elasticsearch_transform" "test" { +resource "elasticstack_elasticsearch_transform" "test_pivot" { name = "%s" description = "test description" @@ -85,6 +135,148 @@ resource "elasticstack_elasticsearch_transform" "test" { `, name) } +func testAccResourceTransformWithPivotUpdate(name string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_transform" "test_pivot" { + name = "%s" + description = "yet another test description" + + source { + indices = ["source_index_for_transform", "additional_index"] + } + + destination { + index = "dest_index_for_transform_v2" + } + + pivot = jsonencode({ + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id", + "missing_bucket": true + } + } + }, + "aggregations": { + "max_price": { + "max": { + "field": "taxful_total_price" + } + } + } + }) + frequency = "10m" + + defer_validation = true + timeout = "1m" +} + `, name) +} + +func testAccResourceTransformWithLatestCreate(name string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_transform" "test_latest" { + name = "%s" + description = "test description (latest)" + + source { + indices = ["source_index_for_transform"] + } + + destination { + index = "dest_index_for_transform" + } + + latest = jsonencode({ + "unique_key": ["customer_id"], + "sort": "order_date" + }) + frequency = "2m" + + defer_validation = true + timeout = "1m" +} + `, name) +} + +func testAccResourceTransformNoDeferCreate(transformName, indexName string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} +} + +resource "elasticstack_elasticsearch_index" "test_index" { + name = "%s" + + alias { + name = "test_alias_1" + } + + mappings = jsonencode({ + properties = { + field1 = { type = "text" } + } + }) + + settings { + setting { + name = "index.number_of_replicas" + value = "2" + } + } + + deletion_protection = false + wait_for_active_shards = "all" + master_timeout = "1m" + timeout = "1m" +} + +resource "elasticstack_elasticsearch_transform" "test_pivot" { + name = "%s" + description = "test description" + + source { + indices = [elasticstack_elasticsearch_index.test_index.name] + } + + destination { + index = "dest_index_for_transform" + } + + pivot = jsonencode({ + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id", + "missing_bucket": true + } + } + }, + "aggregations": { + "max_price": { + "max": { + "field": "taxful_total_price" + } + } + } + }) + frequency = "5m" + + defer_validation = false + timeout = "1m" +} + `, indexName, transformName) +} + func checkResourceTransformDestroy(s *terraform.State) error { client, err := clients.NewAcceptanceTestingClient() if err != nil { diff --git a/internal/models/transform.go b/internal/models/transform.go index 11cbce7ab..ab3d9557c 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -33,6 +33,11 @@ type PutTransformParams struct { Timeout time.Duration } +type UpdateTransformParams struct { + DeferValidation bool + Timeout time.Duration +} + type GetTransformResponse struct { Count json.Number `json:"count,omitempty"` Transforms []Transform `json:"transforms"` From db040527af8dddf937c6154bd8aea5c6af3e5a41 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 16:15:46 +0200 Subject: [PATCH 10/26] adding missing properties for transform --- internal/elasticsearch/transform/transform.go | 112 ++++++++++++++++++ internal/models/transform.go | 48 ++++++-- 2 files changed, 151 insertions(+), 9 deletions(-) diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 017b66c65..6c0e921c2 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -128,6 +128,118 @@ func ResourceTransform() *schema.Resource { ValidateFunc: validation.StringIsJSON, DiffSuppressFunc: utils.DiffJsonSuppress, }, + "retention_policy": { + Description: "Defines a retention policy for the transform.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "time": { + Description: "Specifies that the transform uses a time field to set the retention policy.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Description: "The date field that is used to calculate the age of the document.", + Type: schema.TypeString, + Required: true, + }, + "max_age": { + Description: "Specifies the maximum age of a document in the destination index.", + Type: schema.TypeString, + Required: true, + ValidateFunc: utils.StringIsDuration, + }, + }, + }, + }, + }, + }, + }, + "sync": { + Description: "Defines the properties transforms require to run continuously.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "time": { + Description: "Specifies that the transform uses a time field to synchronize the source and destination indices.", + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Description: "The date field that is used to identify new documents in the source.", + Type: schema.TypeString, + Required: true, + }, + "delay": { + Description: "The time delay between the current time and the latest input data time. The default value is 60s.", + Type: schema.TypeString, + Optional: true, + Default: "60s", + ValidateFunc: utils.StringIsDuration, + }, + }, + }, + }, + }, + }, + }, + "settings": { + Description: "Defines optional transform settings.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "align_checkpoints": { + Description: "Specifies whether the transform checkpoint ranges should be optimized for performance. Default value is true.", + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "dates_as_epoch_millis": { + Description: "Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch.", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "deduce_mappings": { + Description: "Specifies whether the transform should deduce the destination index mappings from the transform config. The default value is true", + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "docs_per_second": { + Description: "Specifies a limit on the number of input documents per second. Default value is null, which disables throttling.", + Type: schema.TypeFloat, + Optional: true, + }, + "max_page_search_size": { + Description: "Defines the initial page size to use for the composite aggregation for each checkpoint. The default value is 500.", + Type: schema.TypeInt, + Optional: true, + }, + "num_failure_retries": { + Description: "Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries.", + Type: schema.TypeInt, + Optional: true, + }, + "unattended": { + Description: "In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false.", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, "defer_validation": { Type: schema.TypeBool, Description: "When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created.", diff --git a/internal/models/transform.go b/internal/models/transform.go index ab3d9557c..61e74694f 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -6,15 +6,17 @@ import ( ) type Transform struct { - Id string `json:"id,omitempty"` - Name string `json:"-"` - Description string `json:"description,omitempty"` - Source TransformSource `json:"source"` - Destination TransformDestination `json:"dest"` - Pivot interface{} `json:"pivot,omitempty"` - Latest interface{} `json:"latest,omitempty"` - Frequency string `json:"frequency,omitempty"` - Meta map[string]interface{} `json:"_meta,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"-"` + Description string `json:"description,omitempty"` + Source TransformSource `json:"source"` + Destination TransformDestination `json:"dest"` + Pivot interface{} `json:"pivot,omitempty"` + Latest interface{} `json:"latest,omitempty"` + Frequency string `json:"frequency,omitempty"` + RetentionPolicy TransformRetentionPolicy `json:"retention_policy,omitempty"` + Sync TransformSync `json:"sync,omitempty"` + Meta map[string]interface{} `json:"_meta,omitempty"` } type TransformSource struct { @@ -28,6 +30,34 @@ type TransformDestination struct { Pipeline string `json:"pipeline,omitempty"` } +type TransformRetentionPolicy struct { + Time TransformRetentionPolicyTime `json:"time"` +} + +type TransformRetentionPolicyTime struct { + Field string `json:"field"` + MaxAge string `json:"max_age"` +} + +type TransformSync struct { + Time TransformSyncTime `json:"time"` +} + +type TransformSyncTime struct { + Field string `json:"field"` + Delay string `json:"delay,omitempty"` +} + +type TransformSettings struct { + AlignCheckpoints *bool `json:"align_checkpoints,omitempty"` + DatesAsEpochMillis *bool `json:"dates_as_epoch_millis,omitempty"` + DeduceMappings *bool `json:"deduce_mappings,omitempty"` + DocsPerSecond *float64 `json:"docs_per_second,omitempty"` + MaxPageSearchSize *int `json:"max_page_search_size,omitempty"` + NumFailureRetries *int `json:"num_failure_retries,omitempty"` + Unattended *bool `json:"unattended,omitempty"` +} + type PutTransformParams struct { DeferValidation bool Timeout time.Duration From b9d0a87ca0b750c045cb21ee7d3036d47c2be623 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Mon, 6 Mar 2023 22:35:03 +0200 Subject: [PATCH 11/26] misc corrections --- internal/clients/elasticsearch/index.go | 12 ++- internal/elasticsearch/transform/transform.go | 95 ++++++++++++++++--- internal/models/transform.go | 23 ++--- internal/utils/validation.go | 50 ++++++++++ 4 files changed, 153 insertions(+), 27 deletions(-) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index e3c665812..4a24a0c6d 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -548,11 +548,13 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { fmt.Println("entering PutTransform") var diags diag.Diagnostics - pipelineBytes, err := json.Marshal(transform) + transformBytes, err := json.Marshal(transform) if err != nil { return diag.FromErr(err) } + fmt.Printf("%s\n", transformBytes) + esClient, err := apiClient.GetESClient() if err != nil { return diag.FromErr(err) @@ -564,7 +566,7 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * esClient.TransformPutTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformPutTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, opts...) if err != nil { return diag.FromErr(err) } @@ -615,11 +617,13 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { fmt.Println("entering UpdateTransform") var diags diag.Diagnostics - pipelineBytes, err := json.Marshal(transform) + transformBytes, err := json.Marshal(transform) if err != nil { return diag.FromErr(err) } + fmt.Printf("%s\n", transformBytes) + esClient, err := apiClient.GetESClient() if err != nil { return diag.FromErr(err) @@ -631,7 +635,7 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor esClient.TransformUpdateTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformUpdateTransform(bytes.NewReader(pipelineBytes), transform.Name, opts...) + res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, opts...) if err != nil { return diag.FromErr(err) } diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 6c0e921c2..843e9c5b0 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -100,7 +100,7 @@ func ResourceTransform() *schema.Resource { Description: "The pivot method transforms the data by aggregating and grouping it.", Type: schema.TypeString, Optional: true, - AtLeastOneOf: []string{"pivot", "latest"}, + ExactlyOneOf: []string{"pivot", "latest"}, DiffSuppressFunc: utils.DiffJsonSuppress, ValidateFunc: validation.StringIsJSON, ForceNew: true, @@ -109,7 +109,7 @@ func ResourceTransform() *schema.Resource { Description: "The latest method transforms the data by finding the latest document for each unique key.", Type: schema.TypeString, Optional: true, - AtLeastOneOf: []string{"pivot", "latest"}, + ExactlyOneOf: []string{"pivot", "latest"}, DiffSuppressFunc: utils.DiffJsonSuppress, ValidateFunc: validation.StringIsJSON, ForceNew: true, @@ -119,7 +119,7 @@ func ResourceTransform() *schema.Resource { Description: "The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`.", Optional: true, Default: "1m", - ValidateFunc: utils.StringIsDuration, + ValidateFunc: utils.StringIsElasticDuration, }, "metadata": { Description: "Defines optional transform metadata.", @@ -151,7 +151,7 @@ func ResourceTransform() *schema.Resource { Description: "Specifies the maximum age of a document in the destination index.", Type: schema.TypeString, Required: true, - ValidateFunc: utils.StringIsDuration, + ValidateFunc: utils.StringIsElasticDuration, }, }, }, @@ -183,7 +183,7 @@ func ResourceTransform() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "60s", - ValidateFunc: utils.StringIsDuration, + ValidateFunc: utils.StringIsElasticDuration, }, }, }, @@ -405,13 +405,12 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n if v, ok := d.GetOk("source"); ok { definedSource := v.([]interface{})[0].(map[string]interface{}) + transform.Source = new(models.TransformSource) indices := make([]string, 0) for _, i := range definedSource["indices"].([]interface{}) { indices = append(indices, i.(string)) } - transform.Source = models.TransformSource{ - Indices: indices, - } + transform.Source.Indices = indices if v, ok := definedSource["query"]; ok && len(v.(string)) > 0 { var query interface{} @@ -431,12 +430,13 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n } if v, ok := d.GetOk("destination"); ok { + definedDestination := v.([]interface{})[0].(map[string]interface{}) - transform.Destination = models.TransformDestination{ - Index: definedDestination["index"].(string), - } + transform.Destination = new(models.TransformDestination) - if pipeline, ok := definedDestination["pipeline"]; ok { + transform.Destination.Index = definedDestination["index"].(string) + + if pipeline, ok := definedDestination["pipeline"]; ok && len(pipeline.(string)) > 0 { transform.Destination.Pipeline = pipeline.(string) } } @@ -457,6 +457,10 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Latest = latest } + if v, ok := d.GetOk("frequency"); ok { + transform.Frequency = v.(string) + } + if v, ok := d.GetOk("metadata"); ok { metadata := make(map[string]interface{}) if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&metadata); err != nil { @@ -465,5 +469,72 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Meta = metadata } + if v, ok := d.GetOk("retention_policy"); ok && v != nil { + definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) + retentionTime := models.TransformRetentionPolicyTime{} + if v, ok := definedRetentionPolicy["time"]; ok { + var definedRetentionTime = v.([]interface{})[0].(map[string]interface{}) + if f, ok := definedRetentionTime["field"]; ok { + retentionTime.Field = f.(string) + } + if ma, ok := definedRetentionTime["max_age"]; ok { + retentionTime.MaxAge = ma.(string) + } + transform.RetentionPolicy = new(models.TransformRetentionPolicy) + transform.RetentionPolicy.Time = retentionTime + } + } + + if v, ok := d.GetOk("sync"); ok { + definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) + syncTime := models.TransformSyncTime{} + if v, ok := definedRetentionPolicy["time"]; ok { + var definedRetentionTime = v.([]interface{})[0].(map[string]interface{}) + if f, ok := definedRetentionTime["field"]; ok { + syncTime.Field = f.(string) + } + if d, ok := definedRetentionTime["delay"]; ok { + syncTime.Delay = d.(string) + } + transform.Sync = new(models.TransformSync) + transform.Sync.Time = syncTime + } + } + + if v, ok := d.GetOk("settings"); ok { + definedSettings := v.([]interface{})[0].(map[string]interface{}) + settings := models.TransformSettings{} + if v, ok := definedSettings["align_checkpoints"]; ok { + settings.AlignCheckpoints = new(bool) + *settings.AlignCheckpoints = v.(bool) + } + if v, ok := definedSettings["dates_as_epoch_millis"]; ok { + settings.DatesAsEpochMillis = new(bool) + *settings.DatesAsEpochMillis = v.(bool) + } + if v, ok := definedSettings["deduce_mappings"]; ok { + settings.DeduceMappings = new(bool) + *settings.DeduceMappings = v.(bool) + } + if v, ok := definedSettings["docs_per_second"]; ok { + settings.DocsPerSecond = new(float64) + *settings.DocsPerSecond = v.(float64) + } + if v, ok := definedSettings["max_page_search_size"]; ok { + settings.MaxPageSearchSize = new(int) + *settings.MaxPageSearchSize = v.(int) + } + if v, ok := definedSettings["num_failure_retries"]; ok { + settings.NumFailureRetries = new(int) + *settings.NumFailureRetries = v.(int) + } + if v, ok := definedSettings["unattended"]; ok { + settings.Unattended = new(bool) + *settings.Unattended = v.(bool) + } + + transform.Settings = &settings + } + return &transform, nil } diff --git a/internal/models/transform.go b/internal/models/transform.go index 61e74694f..945fc631c 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -6,17 +6,18 @@ import ( ) type Transform struct { - Id string `json:"id,omitempty"` - Name string `json:"-"` - Description string `json:"description,omitempty"` - Source TransformSource `json:"source"` - Destination TransformDestination `json:"dest"` - Pivot interface{} `json:"pivot,omitempty"` - Latest interface{} `json:"latest,omitempty"` - Frequency string `json:"frequency,omitempty"` - RetentionPolicy TransformRetentionPolicy `json:"retention_policy,omitempty"` - Sync TransformSync `json:"sync,omitempty"` - Meta map[string]interface{} `json:"_meta,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"-"` + Description string `json:"description,omitempty"` + Source *TransformSource `json:"source"` + Destination *TransformDestination `json:"dest"` + Pivot interface{} `json:"pivot,omitempty"` + Latest interface{} `json:"latest,omitempty"` + Frequency string `json:"frequency,omitempty"` + RetentionPolicy *TransformRetentionPolicy `json:"retention_policy,omitempty"` + Sync *TransformSync `json:"sync,omitempty"` + Meta map[string]interface{} `json:"_meta,omitempty"` + Settings *TransformSettings `json:"settings,omitempty"` } type TransformSource struct { diff --git a/internal/utils/validation.go b/internal/utils/validation.go index 38e90b6d8..8e78538b5 100644 --- a/internal/utils/validation.go +++ b/internal/utils/validation.go @@ -18,3 +18,53 @@ func StringIsDuration(i interface{}, k string) (warnings []string, errors []erro return nil, nil } + +// StringIsElasticDuration is a SchemaValidateFunc which tests to make sure the supplied string is valid duration using Elastic time units: +// d, h, m, s, ms, micros, nanos. (see https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units) +func StringIsElasticDuration(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if v == "" { + return nil, []error{fmt.Errorf("%q contains an invalid duration: [empty]", k)} + } + + firstPartCount := 0 + for v != "" { + // first part must contain only characters in range [0-9] and . + if ('0' <= v[0] && v[0] <= '9') || v[0] == '.' { + v = v[1:] + firstPartCount++ + continue + } + + if firstPartCount == 0 { + return nil, []error{fmt.Errorf("%q contains an invalid duration: should start with a numeric value", k)} + } + + if !isValidElasticTimeUnit(v) { + return nil, []error{fmt.Errorf("%q contains an invalid duration: unrecognized time unit [%s]", k, v)} + } + + break + } + + return nil, nil +} + +func isValidElasticTimeUnit(timeUnit string) bool { + switch timeUnit { + case + "d", + "h", + "m", + "s", + "ms", + "micros", + "nanos": + return true + } + return false +} From a19faaf832eb31b78fe473203f19c1bb5c1c2a35 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 7 Mar 2023 16:36:01 +0200 Subject: [PATCH 12/26] some documentation --- docs/resources/elasticsearch_transform.md | 185 ++++++++++++++++++ .../resource.tf | 48 +++++ internal/elasticsearch/transform/transform.go | 4 +- 3 files changed, 235 insertions(+), 2 deletions(-) create mode 100644 docs/resources/elasticsearch_transform.md create mode 100644 examples/resources/elasticstack_elasticsearch_transform/resource.tf diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md new file mode 100644 index 000000000..85f9f79cd --- /dev/null +++ b/docs/resources/elasticsearch_transform.md @@ -0,0 +1,185 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "elasticstack_elasticsearch_transform Resource - terraform-provider-elasticstack" +subcategory: "" +description: |- + Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html +--- + +# elasticstack_elasticsearch_transform (Resource) + +Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html + +## Example Usage + +```terraform +resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { + name = "transform-pivot" + description = "A meaningful description" + + source { + indices = ["name_or_pattern_for_input_index"] + } + + destination { + index = "destination_index_for_transform" + } + + pivot = jsonencode({ + "group_by" : { + "customer_id" : { + "terms" : { + "field" : "customer_id", + "missing_bucket" : true + } + } + }, + "aggregations" : { + "max_price" : { + "max" : { + "field" : "taxful_total_price" + } + } + } + }) + + frequency = "5m" + + retention_policy { + time { + field = "order_date" + max_age = "30d" + } + } + + sync { + time { + field = "order_date" + delay = "10s" + } + } + + defer_validation = false +} +``` + + +## Schema + +### Required + +- `destination` (Block List, Min: 1, Max: 1) The destination for the transform. (see [below for nested schema](#nestedblock--destination)) +- `name` (String) Name of the transform you wish to create. +- `source` (Block List, Min: 1, Max: 1) The source of the data for the transform. (see [below for nested schema](#nestedblock--source)) + +### Optional + +- `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. +- `description` (String) Free text description of the transform. +- `elasticsearch_connection` (Block List, Max: 1, Deprecated) Elasticsearch connection configuration block. This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead. (see [below for nested schema](#nestedblock--elasticsearch_connection)) +- `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`. +- `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present. +- `metadata` (String) Defines optional transform metadata. +- `pivot` (String) The pivot method transforms the data by aggregating and grouping it. JSON definition expected. Either 'pivot' or 'latest' must be present. +- `retention_policy` (Block List, Max: 1) Defines a retention policy for the transform. (see [below for nested schema](#nestedblock--retention_policy)) +- `settings` (Block List, Max: 1) Defines optional transform settings. (see [below for nested schema](#nestedblock--settings)) +- `sync` (Block List, Max: 1) Defines the properties transforms require to run continuously. (see [below for nested schema](#nestedblock--sync)) +- `timeout` (String) Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. + +### Read-Only + +- `id` (String) Internal identifier of the resource + + +### Nested Schema for `destination` + +Required: + +- `index` (String) The destination index for the transform. + +Optional: + +- `pipeline` (String) The unique identifier for an ingest pipeline. + + + +### Nested Schema for `source` + +Required: + +- `indices` (List of String) The source indices for the transform. + +Optional: + +- `query` (String) A query clause that retrieves a subset of data from the source index. +- `runtime_mappings` (String) Definitions of search-time runtime fields that can be used by the transform. + + + +### Nested Schema for `elasticsearch_connection` + +Optional: + +- `api_key` (String, Sensitive) API Key to use for authentication to Elasticsearch +- `ca_data` (String) PEM-encoded custom Certificate Authority certificate +- `ca_file` (String) Path to a custom Certificate Authority certificate +- `cert_data` (String) PEM encoded certificate for client auth +- `cert_file` (String) Path to a file containing the PEM encoded certificate for client auth +- `endpoints` (List of String, Sensitive) A list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number. +- `insecure` (Boolean) Disable TLS certificate validation +- `key_data` (String, Sensitive) PEM encoded private key for client auth +- `key_file` (String) Path to a file containing the PEM encoded private key for client auth +- `password` (String, Sensitive) Password to use for API authentication to Elasticsearch. +- `username` (String) Username to use for API authentication to Elasticsearch. + + + +### Nested Schema for `retention_policy` + +Required: + +- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to set the retention policy. (see [below for nested schema](#nestedblock--retention_policy--time)) + + +### Nested Schema for `retention_policy.time` + +Required: + +- `field` (String) The date field that is used to calculate the age of the document. +- `max_age` (String) Specifies the maximum age of a document in the destination index. + + + + +### Nested Schema for `settings` + +Optional: + +- `align_checkpoints` (Boolean) Specifies whether the transform checkpoint ranges should be optimized for performance. Default value is true. +- `dates_as_epoch_millis` (Boolean) Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch. +- `deduce_mappings` (Boolean) Specifies whether the transform should deduce the destination index mappings from the transform config. The default value is true +- `docs_per_second` (Number) Specifies a limit on the number of input documents per second. Default value is null, which disables throttling. +- `max_page_search_size` (Number) Defines the initial page size to use for the composite aggregation for each checkpoint. The default value is 500. +- `num_failure_retries` (Number) Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries. +- `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false. + + + +### Nested Schema for `sync` + +Required: + +- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to synchronize the source and destination indices. (see [below for nested schema](#nestedblock--sync--time)) + + +### Nested Schema for `sync.time` + +Required: + +- `field` (String) The date field that is used to identify new documents in the source. + +Optional: + +- `delay` (String) The time delay between the current time and the latest input data time. The default value is 60s. + + diff --git a/examples/resources/elasticstack_elasticsearch_transform/resource.tf b/examples/resources/elasticstack_elasticsearch_transform/resource.tf new file mode 100644 index 000000000..ce8a53230 --- /dev/null +++ b/examples/resources/elasticstack_elasticsearch_transform/resource.tf @@ -0,0 +1,48 @@ +resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { + name = "transform-pivot" + description = "A meaningful description" + + source { + indices = ["name_or_pattern_for_input_index"] + } + + destination { + index = "destination_index_for_transform" + } + + pivot = jsonencode({ + "group_by" : { + "customer_id" : { + "terms" : { + "field" : "customer_id", + "missing_bucket" : true + } + } + }, + "aggregations" : { + "max_price" : { + "max" : { + "field" : "taxful_total_price" + } + } + } + }) + + frequency = "5m" + + retention_policy { + time { + field = "order_date" + max_age = "30d" + } + } + + sync { + time { + field = "order_date" + delay = "10s" + } + } + + defer_validation = false +} \ No newline at end of file diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 843e9c5b0..a80ec43e9 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -97,7 +97,7 @@ func ResourceTransform() *schema.Resource { }, }, "pivot": { - Description: "The pivot method transforms the data by aggregating and grouping it.", + Description: "The pivot method transforms the data by aggregating and grouping it. JSON definition expected. Either 'pivot' or 'latest' must be present.", Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"pivot", "latest"}, @@ -106,7 +106,7 @@ func ResourceTransform() *schema.Resource { ForceNew: true, }, "latest": { - Description: "The latest method transforms the data by finding the latest document for each unique key.", + Description: "The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present.", Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"pivot", "latest"}, From 8c2fb9a38a6c0c34b379f437c318db4e7c588d98 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 7 Mar 2023 18:27:16 +0200 Subject: [PATCH 13/26] support for transform start/stop --- docs/resources/elasticsearch_transform.md | 26 ++++++++------- .../resource.tf | 4 ++- internal/clients/elasticsearch/index.go | 33 +++++++++++++++---- internal/elasticsearch/transform/transform.go | 20 +++++++---- .../elasticsearch/transform/transform_test.go | 7 ++-- internal/models/transform.go | 2 ++ 6 files changed, 64 insertions(+), 28 deletions(-) diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 85f9f79cd..495191fe1 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -1,7 +1,7 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "elasticstack_elasticsearch_transform Resource - terraform-provider-elasticstack" -subcategory: "" +page_title: 'elasticstack_elasticsearch_transform Resource - terraform-provider-elasticstack' +subcategory: '' description: |- Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html --- @@ -59,11 +59,14 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { } } + enabled = false + defer_validation = false } ``` + ## Schema ### Required @@ -77,6 +80,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. - `description` (String) Free text description of the transform. - `elasticsearch_connection` (Block List, Max: 1, Deprecated) Elasticsearch connection configuration block. This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead. (see [below for nested schema](#nestedblock--elasticsearch_connection)) +- `enabled` (Boolean) Controls wether the transform is started or stopped. Default is `false` (stopped). - `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`. - `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present. - `metadata` (String) Defines optional transform metadata. @@ -84,13 +88,14 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `retention_policy` (Block List, Max: 1) Defines a retention policy for the transform. (see [below for nested schema](#nestedblock--retention_policy)) - `settings` (Block List, Max: 1) Defines optional transform settings. (see [below for nested schema](#nestedblock--settings)) - `sync` (Block List, Max: 1) Defines the properties transforms require to run continuously. (see [below for nested schema](#nestedblock--sync)) -- `timeout` (String) Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. +- `timeout` (String) Period to wait for a response from Elastisearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`. ### Read-Only - `id` (String) Internal identifier of the resource + ### Nested Schema for `destination` Required: @@ -101,8 +106,8 @@ Optional: - `pipeline` (String) The unique identifier for an ingest pipeline. - + ### Nested Schema for `source` Required: @@ -114,8 +119,8 @@ Optional: - `query` (String) A query clause that retrieves a subset of data from the source index. - `runtime_mappings` (String) Definitions of search-time runtime fields that can be used by the transform. - + ### Nested Schema for `elasticsearch_connection` Optional: @@ -132,8 +137,8 @@ Optional: - `password` (String, Sensitive) Password to use for API authentication to Elasticsearch. - `username` (String) Username to use for API authentication to Elasticsearch. - + ### Nested Schema for `retention_policy` Required: @@ -141,6 +146,7 @@ Required: - `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to set the retention policy. (see [below for nested schema](#nestedblock--retention_policy--time)) + ### Nested Schema for `retention_policy.time` Required: @@ -148,9 +154,8 @@ Required: - `field` (String) The date field that is used to calculate the age of the document. - `max_age` (String) Specifies the maximum age of a document in the destination index. - - + ### Nested Schema for `settings` Optional: @@ -163,8 +168,8 @@ Optional: - `num_failure_retries` (Number) Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries. - `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false. - + ### Nested Schema for `sync` Required: @@ -172,6 +177,7 @@ Required: - `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to synchronize the source and destination indices. (see [below for nested schema](#nestedblock--sync--time)) + ### Nested Schema for `sync.time` Required: @@ -181,5 +187,3 @@ Required: Optional: - `delay` (String) The time delay between the current time and the latest input data time. The default value is 60s. - - diff --git a/examples/resources/elasticstack_elasticsearch_transform/resource.tf b/examples/resources/elasticstack_elasticsearch_transform/resource.tf index ce8a53230..68e44b8d5 100644 --- a/examples/resources/elasticstack_elasticsearch_transform/resource.tf +++ b/examples/resources/elasticstack_elasticsearch_transform/resource.tf @@ -44,5 +44,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { } } + enabled = false + defer_validation = false -} \ No newline at end of file +} diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index 4a24a0c6d..5009fc9b5 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -546,7 +546,7 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam } func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { - fmt.Println("entering PutTransform") + fmt.Println("entering PutTransform for", transform.Name) var diags diag.Diagnostics transformBytes, err := json.Marshal(transform) if err != nil { @@ -576,11 +576,18 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * return diags } + if params.Enabled { + _, err := esClient.TransformStartTransform(transform.Name) + if err != nil { + return diag.FromErr(err) + } + } + return diags } func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.Transform, diag.Diagnostics) { - fmt.Println("entering GetTransform for ", *name) + fmt.Println("entering GetTransform for", *name) var diags diag.Diagnostics esClient, err := apiClient.GetESClient() if err != nil { @@ -615,7 +622,7 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin } func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { - fmt.Println("entering UpdateTransform") + fmt.Println("entering UpdateTransform with Enabled", params.Enabled) var diags diag.Diagnostics transformBytes, err := json.Marshal(transform) if err != nil { @@ -645,11 +652,23 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor return diags } + if params.Enabled { + _, err := esClient.TransformStartTransform(transform.Name) + if err != nil { + return diag.FromErr(err) + } + } else { + _, err := esClient.TransformStopTransform(transform.Name) + if err != nil { + return diag.FromErr(err) + } + } + return diags } -func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name string) diag.Diagnostics { - fmt.Println("entering DeleteTransform for ", name) +func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) diag.Diagnostics { + fmt.Println("entering DeleteTransform for", *name) var diags diag.Diagnostics esClient, err := apiClient.GetESClient() @@ -657,12 +676,12 @@ func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name str return diag.FromErr(err) } - res, err := esClient.TransformDeleteTransform(name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) + res, err := esClient.TransformDeleteTransform(*name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) if err != nil { return diag.FromErr(err) } defer res.Body.Close() - if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete the transform: %s", name)); diags.HasError() { + if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete the transform: %s", *name)); diags.HasError() { return diags } diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index a80ec43e9..532ced977 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -248,11 +248,17 @@ func ResourceTransform() *schema.Resource { }, "timeout": { Type: schema.TypeString, - Description: "Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`.", + Description: "Period to wait for a response from Elastisearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`.", Optional: true, Default: "30s", ValidateFunc: utils.StringIsDuration, }, + "enabled": { + Type: schema.TypeBool, + Description: "Controls wether the transform is started or stopped. Default is `false` (stopped).", + Optional: true, + Default: false, + }, } utils.AddConnectionSchema(transformSchema) @@ -269,7 +275,6 @@ func ResourceTransform() *schema.Resource { } func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformCreate") client, diags := clients.NewApiClient(d, meta) if diags.HasError() { @@ -297,6 +302,8 @@ func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta i } params.Timeout = timeout + params.Enabled = d.Get("enabled").(bool) + if diags := elasticsearch.PutTransform(ctx, client, transform, ¶ms); diags.HasError() { return diags } @@ -306,7 +313,7 @@ func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta i } func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformRead") + client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags @@ -335,7 +342,6 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int } func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformUpdate") client, diags := clients.NewApiClient(d, meta) if diags.HasError() { @@ -366,6 +372,8 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i } params.Timeout = timeout + params.Enabled = d.Get("enabled").(bool) + if diags := elasticsearch.UpdateTransform(ctx, client, updatedTransform, ¶ms); diags.HasError() { return diags } @@ -374,7 +382,7 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i } func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - fmt.Println("entering resourceTransformDelete") + client, diags := clients.NewApiClient(d, meta) if diags.HasError() { return diags @@ -386,7 +394,7 @@ func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } - if diags := elasticsearch.DeleteTransform(ctx, client, compId.ResourceId); diags.HasError() { + if diags := elasticsearch.DeleteTransform(ctx, client, &compId.ResourceId); diags.HasError() { return diags } diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index 703420cc2..118e19fcf 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -1,14 +1,11 @@ package transform_test import ( - //"context" "fmt" - //"regexp" "testing" "github.com/elastic/terraform-provider-elasticstack/internal/acctest" "github.com/elastic/terraform-provider-elasticstack/internal/clients" - //"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/transform" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -128,6 +125,7 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) frequency = "5m" + enabled = false defer_validation = true timeout = "1m" @@ -171,6 +169,7 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) frequency = "10m" + enabled = true defer_validation = true timeout = "1m" @@ -201,6 +200,7 @@ resource "elasticstack_elasticsearch_transform" "test_latest" { "sort": "order_date" }) frequency = "2m" + enabled = false defer_validation = true timeout = "1m" @@ -270,6 +270,7 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) frequency = "5m" + enabled = false defer_validation = false timeout = "1m" diff --git a/internal/models/transform.go b/internal/models/transform.go index 945fc631c..7a13f4a80 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -62,11 +62,13 @@ type TransformSettings struct { type PutTransformParams struct { DeferValidation bool Timeout time.Duration + Enabled bool } type UpdateTransformParams struct { DeferValidation bool Timeout time.Duration + Enabled bool } type GetTransformResponse struct { From 603e586254ce8e8ce5341f12ba150981bb7cd802 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 7 Mar 2023 19:05:37 +0200 Subject: [PATCH 14/26] addiong options on the client calls for transform start/stop --- internal/clients/elasticsearch/index.go | 26 ++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index 5009fc9b5..fc5b57176 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -560,13 +560,13 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * return diag.FromErr(err) } - opts := []func(*esapi.TransformPutTransformRequest){ + putOptions := []func(*esapi.TransformPutTransformRequest){ esClient.TransformPutTransform.WithContext(ctx), esClient.TransformPutTransform.WithDeferValidation(params.DeferValidation), esClient.TransformPutTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, opts...) + res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, putOptions...) if err != nil { return diag.FromErr(err) } @@ -577,7 +577,11 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * } if params.Enabled { - _, err := esClient.TransformStartTransform(transform.Name) + startOptions := []func(*esapi.TransformStartTransformRequest){ + esClient.TransformStartTransform.WithContext(ctx), + esClient.TransformStartTransform.WithTimeout(params.Timeout), + } + _, err := esClient.TransformStartTransform(transform.Name, startOptions...) if err != nil { return diag.FromErr(err) } @@ -636,13 +640,13 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor return diag.FromErr(err) } - opts := []func(*esapi.TransformUpdateTransformRequest){ + updateOptions := []func(*esapi.TransformUpdateTransformRequest){ esClient.TransformUpdateTransform.WithContext(ctx), esClient.TransformUpdateTransform.WithDeferValidation(params.DeferValidation), esClient.TransformUpdateTransform.WithTimeout(params.Timeout), } - res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, opts...) + res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, updateOptions...) if err != nil { return diag.FromErr(err) } @@ -653,12 +657,20 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor } if params.Enabled { - _, err := esClient.TransformStartTransform(transform.Name) + startOptions := []func(*esapi.TransformStartTransformRequest){ + esClient.TransformStartTransform.WithContext(ctx), + esClient.TransformStartTransform.WithTimeout(params.Timeout), + } + _, err := esClient.TransformStartTransform(transform.Name, startOptions...) if err != nil { return diag.FromErr(err) } } else { - _, err := esClient.TransformStopTransform(transform.Name) + stopOptions := []func(*esapi.TransformStopTransformRequest){ + esClient.TransformStopTransform.WithContext(ctx), + esClient.TransformStopTransform.WithTimeout(params.Timeout), + } + _, err := esClient.TransformStopTransform(transform.Name, stopOptions...) if err != nil { return diag.FromErr(err) } From 1d9aa97a7be7ff744fd2810235cec7cfa873a5e2 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Thu, 9 Mar 2023 18:45:37 +0200 Subject: [PATCH 15/26] code updates based on feedback --- internal/clients/elasticsearch/index.go | 37 ++++++--- internal/elasticsearch/transform/transform.go | 79 +++++++++---------- .../elasticsearch/transform/transform_test.go | 62 ++++++++++++++- internal/utils/validation.go | 36 +-------- internal/utils/validation_test.go | 47 +++++++++++ 5 files changed, 175 insertions(+), 86 deletions(-) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index fc5b57176..4f8f30a3c 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -546,15 +546,13 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam } func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { - fmt.Println("entering PutTransform for", transform.Name) + var diags diag.Diagnostics transformBytes, err := json.Marshal(transform) if err != nil { return diag.FromErr(err) } - fmt.Printf("%s\n", transformBytes) - esClient, err := apiClient.GetESClient() if err != nil { return diag.FromErr(err) @@ -581,27 +579,34 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * esClient.TransformStartTransform.WithContext(ctx), esClient.TransformStartTransform.WithTimeout(params.Timeout), } - _, err := esClient.TransformStartTransform(transform.Name, startOptions...) + startRes, err := esClient.TransformStartTransform(transform.Name, startOptions...) if err != nil { return diag.FromErr(err) } + + defer startRes.Body.Close() + if diags := utils.CheckError(startRes, fmt.Sprintf("Unable to start transform: %s", transform.Name)); diags.HasError() { + return diags + } } return diags } func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.Transform, diag.Diagnostics) { - fmt.Println("entering GetTransform for", *name) + var diags diag.Diagnostics esClient, err := apiClient.GetESClient() if err != nil { return nil, diag.FromErr(err) } + req := esClient.TransformGetTransform.WithTransformID(*name) res, err := esClient.TransformGetTransform(req, esClient.TransformGetTransform.WithContext(ctx)) if err != nil { return nil, diag.FromErr(err) } + defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return nil, nil @@ -626,15 +631,13 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin } func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { - fmt.Println("entering UpdateTransform with Enabled", params.Enabled) + var diags diag.Diagnostics transformBytes, err := json.Marshal(transform) if err != nil { return diag.FromErr(err) } - fmt.Printf("%s\n", transformBytes) - esClient, err := apiClient.GetESClient() if err != nil { return diag.FromErr(err) @@ -661,28 +664,36 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor esClient.TransformStartTransform.WithContext(ctx), esClient.TransformStartTransform.WithTimeout(params.Timeout), } - _, err := esClient.TransformStartTransform(transform.Name, startOptions...) + startRes, err := esClient.TransformStartTransform(transform.Name, startOptions...) if err != nil { return diag.FromErr(err) } + + defer startRes.Body.Close() + if diags := utils.CheckError(startRes, fmt.Sprintf("Unable to start transform: %s", transform.Name)); diags.HasError() { + return diags + } } else { stopOptions := []func(*esapi.TransformStopTransformRequest){ esClient.TransformStopTransform.WithContext(ctx), esClient.TransformStopTransform.WithTimeout(params.Timeout), } - _, err := esClient.TransformStopTransform(transform.Name, stopOptions...) + stopRes, err := esClient.TransformStopTransform(transform.Name, stopOptions...) if err != nil { return diag.FromErr(err) } + defer stopRes.Body.Close() + if diags := utils.CheckError(stopRes, fmt.Sprintf("Unable to stop transform: %s", transform.Name)); diags.HasError() { + return diags + } } return diags } func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) diag.Diagnostics { - fmt.Println("entering DeleteTransform for", *name) - var diags diag.Diagnostics + var diags diag.Diagnostics esClient, err := apiClient.GetESClient() if err != nil { return diag.FromErr(err) @@ -693,7 +704,7 @@ func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name *st return diag.FromErr(err) } defer res.Body.Close() - if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete the transform: %s", *name)); diags.HasError() { + if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete transform: %s", *name)); diags.HasError() { return diags } diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 532ced977..4be7ab730 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -4,9 +4,7 @@ import ( "context" "encoding/json" "fmt" - //"reflect" "regexp" - //"strconv" "strings" "time" @@ -14,10 +12,8 @@ import ( "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch" "github.com/elastic/terraform-provider-elasticstack/internal/models" "github.com/elastic/terraform-provider-elasticstack/internal/utils" - //"github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - //"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -261,8 +257,6 @@ func ResourceTransform() *schema.Resource { }, } - utils.AddConnectionSchema(transformSchema) - return &schema.Resource{ Schema: transformSchema, Description: "Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html", @@ -292,17 +286,16 @@ func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta i return diag.FromErr(err) } - params := models.PutTransformParams{ - DeferValidation: d.Get("defer_validation").(bool), - } - timeout, err := time.ParseDuration(d.Get("timeout").(string)) if err != nil { return diag.FromErr(err) } - params.Timeout = timeout - params.Enabled = d.Get("enabled").(bool) + params := models.PutTransformParams{ + DeferValidation: d.Get("defer_validation").(bool), + Enabled: d.Get("enabled").(bool), + Timeout: timeout, + } if diags := elasticsearch.PutTransform(ctx, client, transform, ¶ms); diags.HasError() { return diags @@ -440,9 +433,10 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n if v, ok := d.GetOk("destination"); ok { definedDestination := v.([]interface{})[0].(map[string]interface{}) - transform.Destination = new(models.TransformDestination) - transform.Destination.Index = definedDestination["index"].(string) + transform.Destination = &models.TransformDestination{ + Index: definedDestination["index"].(string), + } if pipeline, ok := definedDestination["pipeline"]; ok && len(pipeline.(string)) > 0 { transform.Destination.Pipeline = pipeline.(string) @@ -470,7 +464,7 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n } if v, ok := d.GetOk("metadata"); ok { - metadata := make(map[string]interface{}) + var metadata map[string]interface{} if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&metadata); err != nil { return nil, err } @@ -479,8 +473,9 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n if v, ok := d.GetOk("retention_policy"); ok && v != nil { definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) - retentionTime := models.TransformRetentionPolicyTime{} + if v, ok := definedRetentionPolicy["time"]; ok { + retentionTime := models.TransformRetentionPolicyTime{} var definedRetentionTime = v.([]interface{})[0].(map[string]interface{}) if f, ok := definedRetentionTime["field"]; ok { retentionTime.Field = f.(string) @@ -488,57 +483,61 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n if ma, ok := definedRetentionTime["max_age"]; ok { retentionTime.MaxAge = ma.(string) } - transform.RetentionPolicy = new(models.TransformRetentionPolicy) - transform.RetentionPolicy.Time = retentionTime + transform.RetentionPolicy = &models.TransformRetentionPolicy{ + Time: retentionTime, + } } } if v, ok := d.GetOk("sync"); ok { - definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) - syncTime := models.TransformSyncTime{} - if v, ok := definedRetentionPolicy["time"]; ok { - var definedRetentionTime = v.([]interface{})[0].(map[string]interface{}) - if f, ok := definedRetentionTime["field"]; ok { + definedSync := v.([]interface{})[0].(map[string]interface{}) + + if v, ok := definedSync["time"]; ok { + syncTime := models.TransformSyncTime{} + var definedSyncTime = v.([]interface{})[0].(map[string]interface{}) + if f, ok := definedSyncTime["field"]; ok { syncTime.Field = f.(string) } - if d, ok := definedRetentionTime["delay"]; ok { + if d, ok := definedSyncTime["delay"]; ok { syncTime.Delay = d.(string) } - transform.Sync = new(models.TransformSync) - transform.Sync.Time = syncTime + transform.Sync = &models.TransformSync{ + Time: syncTime, + } } } if v, ok := d.GetOk("settings"); ok { definedSettings := v.([]interface{})[0].(map[string]interface{}) + settings := models.TransformSettings{} if v, ok := definedSettings["align_checkpoints"]; ok { - settings.AlignCheckpoints = new(bool) - *settings.AlignCheckpoints = v.(bool) + ac := v.(bool) + settings.AlignCheckpoints = &ac } if v, ok := definedSettings["dates_as_epoch_millis"]; ok { - settings.DatesAsEpochMillis = new(bool) - *settings.DatesAsEpochMillis = v.(bool) + dem := v.(bool) + settings.DatesAsEpochMillis = &dem } if v, ok := definedSettings["deduce_mappings"]; ok { - settings.DeduceMappings = new(bool) - *settings.DeduceMappings = v.(bool) + dm := v.(bool) + settings.DeduceMappings = &dm } if v, ok := definedSettings["docs_per_second"]; ok { - settings.DocsPerSecond = new(float64) - *settings.DocsPerSecond = v.(float64) + dps := v.(float64) + settings.DocsPerSecond = &dps } if v, ok := definedSettings["max_page_search_size"]; ok { - settings.MaxPageSearchSize = new(int) - *settings.MaxPageSearchSize = v.(int) + mpss := v.(int) + settings.MaxPageSearchSize = &mpss } if v, ok := definedSettings["num_failure_retries"]; ok { - settings.NumFailureRetries = new(int) - *settings.NumFailureRetries = v.(int) + nfr := v.(int) + settings.NumFailureRetries = &nfr } if v, ok := definedSettings["unattended"]; ok { - settings.Unattended = new(bool) - *settings.Unattended = v.(bool) + u := v.(bool) + settings.Unattended = &u } transform.Settings = &settings diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index 118e19fcf..788df9d21 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -89,6 +89,8 @@ func TestAccResourceTransformNoDefer(t *testing.T) { }) } +// create a transform referencing a non-existing source index; +// because validations are deferred, this should pass func testAccResourceTransformWithPivotCreate(name string) string { return fmt.Sprintf(` provider "elasticstack" { @@ -133,18 +135,76 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { `, name) } +// update the existing transform, add another source index and start it (enabled = true) +// validations are now unavoidable (at start), so make sure to create the indices _before_ updating the transform +// the tf script below uses implicit dependency, but `depends_on` is also an option func testAccResourceTransformWithPivotUpdate(name string) string { return fmt.Sprintf(` provider "elasticstack" { elasticsearch {} } +resource "elasticstack_elasticsearch_index" "test_source_index_1" { + name = "source_index_for_transform" + + alias { + name = "test_alias_1" + } + + mappings = jsonencode({ + properties = { + field1 = { type = "text" } + } + }) + + settings { + setting { + name = "index.number_of_replicas" + value = "2" + } + } + + deletion_protection = false + wait_for_active_shards = "all" + master_timeout = "1m" + timeout = "1m" +} + +resource "elasticstack_elasticsearch_index" "test_source_index_2" { + name = "additional_index" + + alias { + name = "test_alias_2" + } + + mappings = jsonencode({ + properties = { + field1 = { type = "text" } + } + }) + + settings { + setting { + name = "index.number_of_replicas" + value = "2" + } + } + + deletion_protection = false + wait_for_active_shards = "all" + master_timeout = "1m" + timeout = "1m" +} + resource "elasticstack_elasticsearch_transform" "test_pivot" { name = "%s" description = "yet another test description" source { - indices = ["source_index_for_transform", "additional_index"] + indices = [ + elasticstack_elasticsearch_index.test_source_index_1.name, + elasticstack_elasticsearch_index.test_source_index_2.name + ] } destination { diff --git a/internal/utils/validation.go b/internal/utils/validation.go index 8e78538b5..04a367482 100644 --- a/internal/utils/validation.go +++ b/internal/utils/validation.go @@ -2,6 +2,7 @@ package utils import ( "fmt" + "regexp" "time" ) @@ -31,40 +32,11 @@ func StringIsElasticDuration(i interface{}, k string) (warnings []string, errors return nil, []error{fmt.Errorf("%q contains an invalid duration: [empty]", k)} } - firstPartCount := 0 - for v != "" { - // first part must contain only characters in range [0-9] and . - if ('0' <= v[0] && v[0] <= '9') || v[0] == '.' { - v = v[1:] - firstPartCount++ - continue - } + r := regexp.MustCompile(`^[0-9]+(?:\.[0-9]+)?(?:d|h|m|s|ms|micros|nanos)$`) - if firstPartCount == 0 { - return nil, []error{fmt.Errorf("%q contains an invalid duration: should start with a numeric value", k)} - } - - if !isValidElasticTimeUnit(v) { - return nil, []error{fmt.Errorf("%q contains an invalid duration: unrecognized time unit [%s]", k, v)} - } - - break + if !r.MatchString(v) { + return nil, []error{fmt.Errorf("%q contains an invalid duration: not conforming to Elastic time-units format", k)} } return nil, nil } - -func isValidElasticTimeUnit(timeUnit string) bool { - switch timeUnit { - case - "d", - "h", - "m", - "s", - "ms", - "micros", - "nanos": - return true - } - return false -} diff --git a/internal/utils/validation_test.go b/internal/utils/validation_test.go index 3a8b8c1d0..ef06bed5a 100644 --- a/internal/utils/validation_test.go +++ b/internal/utils/validation_test.go @@ -46,3 +46,50 @@ func TestStringIsDuration(t *testing.T) { }) } } + +func TestStringIsElasticDuration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + i interface{} + k string + wantWarnings []string + wantErrors []error + }{ + { + name: "valid Elastic duration string", + i: "30d", + k: "delay", + }, + { + name: "invalid Elastic duration unit", + i: "12w", + k: "delay", + wantErrors: []error{errors.New(`"delay" contains an invalid duration: not conforming to Elastic time-units format`)}, + }, + { + name: "invalid Elastic duration value", + i: ".12s", + k: "delay", + wantErrors: []error{errors.New(`"delay" contains an invalid duration: not conforming to Elastic time-units format`)}, + }, + { + name: "invalid data type", + i: 30, + k: "delay", + wantErrors: []error{errors.New("expected type of delay to be string")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotWarnings, gotErrors := StringIsElasticDuration(tt.i, tt.k) + if !reflect.DeepEqual(gotWarnings, tt.wantWarnings) { + t.Errorf("StringIsElasticDuration() gotWarnings = %v, want %v", gotWarnings, tt.wantWarnings) + } + if !reflect.DeepEqual(gotErrors, tt.wantErrors) { + t.Errorf("StringIsElasticDuration() gotErrors = %v, want %v", gotErrors, tt.wantErrors) + } + }) + } +} From 2f3b5e8ed8045dc70fbc307e88134220411a4c95 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Thu, 9 Mar 2023 19:11:19 +0200 Subject: [PATCH 16/26] updated documentation (added md template) --- docs/resources/elasticsearch_transform.md | 35 ++++++------------- .../resources/elasticsearch_transform.md.tmpl | 21 +++++++++++ 2 files changed, 31 insertions(+), 25 deletions(-) create mode 100644 templates/resources/elasticsearch_transform.md.tmpl diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 495191fe1..5936e3178 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -1,14 +1,14 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: 'elasticstack_elasticsearch_transform Resource - terraform-provider-elasticstack' -subcategory: '' +subcategory: 'Transform' +layout: '' +page_title: 'Elasticstack: elasticstack_elasticsearch_transform Resource' description: |- - Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html + Manages transforms. Transforms enable you to convert existing Elasticsearch indices into summarized indices. --- -# elasticstack_elasticsearch_transform (Resource) +# Resource: elasticstack_elasticsearch_transform -Manages Elasticsearch transforms. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html +Creates, updates, starts and stops a transform. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html ## Example Usage @@ -79,7 +79,6 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. - `description` (String) Free text description of the transform. -- `elasticsearch_connection` (Block List, Max: 1, Deprecated) Elasticsearch connection configuration block. This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead. (see [below for nested schema](#nestedblock--elasticsearch_connection)) - `enabled` (Boolean) Controls wether the transform is started or stopped. Default is `false` (stopped). - `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`. - `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present. @@ -119,24 +118,6 @@ Optional: - `query` (String) A query clause that retrieves a subset of data from the source index. - `runtime_mappings` (String) Definitions of search-time runtime fields that can be used by the transform. - - -### Nested Schema for `elasticsearch_connection` - -Optional: - -- `api_key` (String, Sensitive) API Key to use for authentication to Elasticsearch -- `ca_data` (String) PEM-encoded custom Certificate Authority certificate -- `ca_file` (String) Path to a custom Certificate Authority certificate -- `cert_data` (String) PEM encoded certificate for client auth -- `cert_file` (String) Path to a file containing the PEM encoded certificate for client auth -- `endpoints` (List of String, Sensitive) A list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number. -- `insecure` (Boolean) Disable TLS certificate validation -- `key_data` (String, Sensitive) PEM encoded private key for client auth -- `key_file` (String) Path to a file containing the PEM encoded private key for client auth -- `password` (String, Sensitive) Password to use for API authentication to Elasticsearch. -- `username` (String) Username to use for API authentication to Elasticsearch. - ### Nested Schema for `retention_policy` @@ -187,3 +168,7 @@ Required: Optional: - `delay` (String) The time delay between the current time and the latest input data time. The default value is 60s. + +## Import + +Not implemented yet. diff --git a/templates/resources/elasticsearch_transform.md.tmpl b/templates/resources/elasticsearch_transform.md.tmpl new file mode 100644 index 000000000..be274e4f0 --- /dev/null +++ b/templates/resources/elasticsearch_transform.md.tmpl @@ -0,0 +1,21 @@ +--- +subcategory: "Transform" +layout: "" +page_title: "Elasticstack: elasticstack_elasticsearch_transform Resource" +description: |- + Manages transforms. Transforms enable you to convert existing Elasticsearch indices into summarized indices. +--- + +# Resource: elasticstack_elasticsearch_transform + +Creates, updates, starts and stops a transform. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html + +## Example Usage + +{{ tffile "examples/resources/elasticstack_elasticsearch_transform/resource.tf" }} + +{{ .SchemaMarkdown | trimspace }} + +## Import + +Not implemented yet. From e497cd9f60365b48c30bca20bc3e843ddfd02c51 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Thu, 9 Mar 2023 21:46:55 +0200 Subject: [PATCH 17/26] timeout query param is only available from v7.17.0 --- CHANGELOG.md | 1 + internal/clients/elasticsearch/index.go | 166 -------------- internal/clients/elasticsearch/transform.go | 240 ++++++++++++++++++++ 3 files changed, 241 insertions(+), 166 deletions(-) create mode 100644 internal/clients/elasticsearch/transform.go diff --git a/CHANGELOG.md b/CHANGELOG.md index ee10cc58c..d1f9d7b9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ } ``` - Add support for managing Kibana spaces ([#272](https://github.com/elastic/terraform-provider-elasticstack/pull/272)) +- Add support for managing Elasticsearch transforms ([#284](https://github.com/elastic/terraform-provider-elasticstack/pull/284)) ### Fixed - Respect `ignore_unavailable` and `include_global_state` values when configuring SLM policies ([#224](https://github.com/elastic/terraform-provider-elasticstack/pull/224)) diff --git a/internal/clients/elasticsearch/index.go b/internal/clients/elasticsearch/index.go index 4f8f30a3c..b9c3054fb 100644 --- a/internal/clients/elasticsearch/index.go +++ b/internal/clients/elasticsearch/index.go @@ -544,169 +544,3 @@ func DeleteIngestPipeline(ctx context.Context, apiClient *clients.ApiClient, nam } return diags } - -func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { - - var diags diag.Diagnostics - transformBytes, err := json.Marshal(transform) - if err != nil { - return diag.FromErr(err) - } - - esClient, err := apiClient.GetESClient() - if err != nil { - return diag.FromErr(err) - } - - putOptions := []func(*esapi.TransformPutTransformRequest){ - esClient.TransformPutTransform.WithContext(ctx), - esClient.TransformPutTransform.WithDeferValidation(params.DeferValidation), - esClient.TransformPutTransform.WithTimeout(params.Timeout), - } - - res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, putOptions...) - if err != nil { - return diag.FromErr(err) - } - - defer res.Body.Close() - if diags := utils.CheckError(res, fmt.Sprintf("Unable to create transform: %s", transform.Name)); diags.HasError() { - return diags - } - - if params.Enabled { - startOptions := []func(*esapi.TransformStartTransformRequest){ - esClient.TransformStartTransform.WithContext(ctx), - esClient.TransformStartTransform.WithTimeout(params.Timeout), - } - startRes, err := esClient.TransformStartTransform(transform.Name, startOptions...) - if err != nil { - return diag.FromErr(err) - } - - defer startRes.Body.Close() - if diags := utils.CheckError(startRes, fmt.Sprintf("Unable to start transform: %s", transform.Name)); diags.HasError() { - return diags - } - } - - return diags -} - -func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.Transform, diag.Diagnostics) { - - var diags diag.Diagnostics - esClient, err := apiClient.GetESClient() - if err != nil { - return nil, diag.FromErr(err) - } - - req := esClient.TransformGetTransform.WithTransformID(*name) - res, err := esClient.TransformGetTransform(req, esClient.TransformGetTransform.WithContext(ctx)) - if err != nil { - return nil, diag.FromErr(err) - } - - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return nil, nil - } - if diags := utils.CheckError(res, fmt.Sprintf("Unable to get requested transform: %s", *name)); diags.HasError() { - return nil, diags - } - - transformsResponse := models.GetTransformResponse{} - if err := json.NewDecoder(res.Body).Decode(&transformsResponse); err != nil { - return nil, diag.FromErr(err) - } - - for _, t := range transformsResponse.Transforms { - if t.Id == *name { - t.Name = *name - return &t, diags - } - } - - return nil, diags -} - -func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { - - var diags diag.Diagnostics - transformBytes, err := json.Marshal(transform) - if err != nil { - return diag.FromErr(err) - } - - esClient, err := apiClient.GetESClient() - if err != nil { - return diag.FromErr(err) - } - - updateOptions := []func(*esapi.TransformUpdateTransformRequest){ - esClient.TransformUpdateTransform.WithContext(ctx), - esClient.TransformUpdateTransform.WithDeferValidation(params.DeferValidation), - esClient.TransformUpdateTransform.WithTimeout(params.Timeout), - } - - res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, updateOptions...) - if err != nil { - return diag.FromErr(err) - } - - defer res.Body.Close() - if diags := utils.CheckError(res, fmt.Sprintf("Unable to update transform: %s", transform.Name)); diags.HasError() { - return diags - } - - if params.Enabled { - startOptions := []func(*esapi.TransformStartTransformRequest){ - esClient.TransformStartTransform.WithContext(ctx), - esClient.TransformStartTransform.WithTimeout(params.Timeout), - } - startRes, err := esClient.TransformStartTransform(transform.Name, startOptions...) - if err != nil { - return diag.FromErr(err) - } - - defer startRes.Body.Close() - if diags := utils.CheckError(startRes, fmt.Sprintf("Unable to start transform: %s", transform.Name)); diags.HasError() { - return diags - } - } else { - stopOptions := []func(*esapi.TransformStopTransformRequest){ - esClient.TransformStopTransform.WithContext(ctx), - esClient.TransformStopTransform.WithTimeout(params.Timeout), - } - stopRes, err := esClient.TransformStopTransform(transform.Name, stopOptions...) - if err != nil { - return diag.FromErr(err) - } - defer stopRes.Body.Close() - if diags := utils.CheckError(stopRes, fmt.Sprintf("Unable to stop transform: %s", transform.Name)); diags.HasError() { - return diags - } - } - - return diags -} - -func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) diag.Diagnostics { - - var diags diag.Diagnostics - esClient, err := apiClient.GetESClient() - if err != nil { - return diag.FromErr(err) - } - - res, err := esClient.TransformDeleteTransform(*name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) - if err != nil { - return diag.FromErr(err) - } - defer res.Body.Close() - if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete transform: %s", *name)); diags.HasError() { - return diags - } - - return diags -} diff --git a/internal/clients/elasticsearch/transform.go b/internal/clients/elasticsearch/transform.go new file mode 100644 index 000000000..6fc138511 --- /dev/null +++ b/internal/clients/elasticsearch/transform.go @@ -0,0 +1,240 @@ +package elasticsearch + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/elastic/terraform-provider-elasticstack/internal/clients" + "github.com/elastic/terraform-provider-elasticstack/internal/models" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" +) + +var apiOperationTimeoutParamMinSupportedVersion = version.Must(version.NewVersion("7.17.0")) + +func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { + + var diags diag.Diagnostics + transformBytes, err := json.Marshal(transform) + if err != nil { + return diag.FromErr(err) + } + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + serverVersion, diags := apiClient.ServerVersion(ctx) + if diags.HasError() { + return diags + } + + withTimeout := serverVersion.GreaterThanOrEqual(apiOperationTimeoutParamMinSupportedVersion) + + putOptions := []func(*esapi.TransformPutTransformRequest){ + esClient.TransformPutTransform.WithContext(ctx), + esClient.TransformPutTransform.WithDeferValidation(params.DeferValidation), + } + + if withTimeout { + putOptions = append(putOptions, esClient.TransformPutTransform.WithTimeout(params.Timeout)) + } + + res, err := esClient.TransformPutTransform(bytes.NewReader(transformBytes), transform.Name, putOptions...) + if err != nil { + return diag.FromErr(err) + } + + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to create transform: %s", transform.Name)); diags.HasError() { + return diags + } + + if params.Enabled { + + var timeout time.Duration + if withTimeout { + timeout = params.Timeout + } else { + timeout = 0 + } + + if diags := startTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { + return diags + } + } + + return diags +} + +func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.Transform, diag.Diagnostics) { + + var diags diag.Diagnostics + esClient, err := apiClient.GetESClient() + if err != nil { + return nil, diag.FromErr(err) + } + + req := esClient.TransformGetTransform.WithTransformID(*name) + res, err := esClient.TransformGetTransform(req, esClient.TransformGetTransform.WithContext(ctx)) + if err != nil { + return nil, diag.FromErr(err) + } + + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return nil, nil + } + if diags := utils.CheckError(res, fmt.Sprintf("Unable to get requested transform: %s", *name)); diags.HasError() { + return nil, diags + } + + transformsResponse := models.GetTransformResponse{} + if err := json.NewDecoder(res.Body).Decode(&transformsResponse); err != nil { + return nil, diag.FromErr(err) + } + + for _, t := range transformsResponse.Transforms { + if t.Id == *name { + t.Name = *name + return &t, diags + } + } + + return nil, diags +} + +func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { + + var diags diag.Diagnostics + transformBytes, err := json.Marshal(transform) + if err != nil { + return diag.FromErr(err) + } + + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + serverVersion, diags := apiClient.ServerVersion(ctx) + if diags.HasError() { + return diags + } + + withTimeout := serverVersion.GreaterThanOrEqual(apiOperationTimeoutParamMinSupportedVersion) + + updateOptions := []func(*esapi.TransformUpdateTransformRequest){ + esClient.TransformUpdateTransform.WithContext(ctx), + esClient.TransformUpdateTransform.WithDeferValidation(params.DeferValidation), + } + + if withTimeout { + updateOptions = append(updateOptions, esClient.TransformUpdateTransform.WithTimeout(params.Timeout)) + } + + res, err := esClient.TransformUpdateTransform(bytes.NewReader(transformBytes), transform.Name, updateOptions...) + if err != nil { + return diag.FromErr(err) + } + + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to update transform: %s", transform.Name)); diags.HasError() { + return diags + } + + var timeout time.Duration + if withTimeout { + timeout = params.Timeout + } else { + timeout = 0 + } + + if params.Enabled { + if diags := startTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { + return diags + } + } else { + if diags := stopTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { + return diags + } + } + + return diags +} + +func DeleteTransform(ctx context.Context, apiClient *clients.ApiClient, name *string) diag.Diagnostics { + + var diags diag.Diagnostics + esClient, err := apiClient.GetESClient() + if err != nil { + return diag.FromErr(err) + } + + res, err := esClient.TransformDeleteTransform(*name, esClient.TransformDeleteTransform.WithForce(true), esClient.TransformDeleteTransform.WithContext(ctx)) + if err != nil { + return diag.FromErr(err) + } + defer res.Body.Close() + if diags := utils.CheckError(res, fmt.Sprintf("Unable to delete transform: %s", *name)); diags.HasError() { + return diags + } + + return diags +} + +func startTransform(ctx context.Context, esClient *elasticsearch.Client, transformName string, timeout time.Duration) diag.Diagnostics { + var diags diag.Diagnostics + + startOptions := []func(*esapi.TransformStartTransformRequest){ + esClient.TransformStartTransform.WithContext(ctx), + } + + if timeout > 0 { + startOptions = append(startOptions, esClient.TransformStartTransform.WithTimeout(timeout)) + } + + startRes, err := esClient.TransformStartTransform(transformName, startOptions...) + if err != nil { + return diag.FromErr(err) + } + + defer startRes.Body.Close() + if diags := utils.CheckError(startRes, fmt.Sprintf("Unable to start transform: %s", transformName)); diags.HasError() { + return diags + } + + return diags +} + +func stopTransform(ctx context.Context, esClient *elasticsearch.Client, transformName string, timeout time.Duration) diag.Diagnostics { + var diags diag.Diagnostics + + stopOptions := []func(*esapi.TransformStopTransformRequest){ + esClient.TransformStopTransform.WithContext(ctx), + } + + if timeout > 0 { + stopOptions = append(stopOptions, esClient.TransformStopTransform.WithTimeout(timeout)) + } + + startRes, err := esClient.TransformStopTransform(transformName, stopOptions...) + if err != nil { + return diag.FromErr(err) + } + + defer startRes.Body.Close() + if diags := utils.CheckError(startRes, fmt.Sprintf("Unable to stop transform: %s", transformName)); diags.HasError() { + return diags + } + + return diags +} From c040977d79389ff9c9e43239a7157926c137acf7 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Thu, 9 Mar 2023 22:30:49 +0200 Subject: [PATCH 18/26] fixed the messed up md file --- docs/resources/elasticsearch_transform.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 5936e3178..5d43647f8 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -1,7 +1,7 @@ --- -subcategory: 'Transform' -layout: '' -page_title: 'Elasticstack: elasticstack_elasticsearch_transform Resource' +subcategory: "Transform" +layout: "" +page_title: "Elasticstack: elasticstack_elasticsearch_transform Resource" description: |- Manages transforms. Transforms enable you to convert existing Elasticsearch indices into summarized indices. --- @@ -66,7 +66,6 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { ``` - ## Schema ### Required @@ -94,7 +93,6 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `id` (String) Internal identifier of the resource - ### Nested Schema for `destination` Required: @@ -105,8 +103,8 @@ Optional: - `pipeline` (String) The unique identifier for an ingest pipeline. - + ### Nested Schema for `source` Required: @@ -118,8 +116,8 @@ Optional: - `query` (String) A query clause that retrieves a subset of data from the source index. - `runtime_mappings` (String) Definitions of search-time runtime fields that can be used by the transform. - + ### Nested Schema for `retention_policy` Required: @@ -127,7 +125,6 @@ Required: - `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to set the retention policy. (see [below for nested schema](#nestedblock--retention_policy--time)) - ### Nested Schema for `retention_policy.time` Required: @@ -135,8 +132,9 @@ Required: - `field` (String) The date field that is used to calculate the age of the document. - `max_age` (String) Specifies the maximum age of a document in the destination index. - + + ### Nested Schema for `settings` Optional: @@ -149,8 +147,8 @@ Optional: - `num_failure_retries` (Number) Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries. - `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false. - + ### Nested Schema for `sync` Required: @@ -158,7 +156,6 @@ Required: - `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to synchronize the source and destination indices. (see [below for nested schema](#nestedblock--sync--time)) - ### Nested Schema for `sync.time` Required: From 8fc867cbd745d72eca254809c2e76e418ebed177 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Sat, 11 Mar 2023 21:18:11 +0200 Subject: [PATCH 19/26] settings promoted to individual arguments; drift detection for updatable props --- internal/clients/elasticsearch/transform.go | 72 +++- internal/elasticsearch/transform/transform.go | 382 ++++++++++++++---- internal/models/transform.go | 19 +- 3 files changed, 386 insertions(+), 87 deletions(-) diff --git a/internal/clients/elasticsearch/transform.go b/internal/clients/elasticsearch/transform.go index 6fc138511..0f2d4d4eb 100644 --- a/internal/clients/elasticsearch/transform.go +++ b/internal/clients/elasticsearch/transform.go @@ -97,19 +97,77 @@ func GetTransform(ctx context.Context, apiClient *clients.ApiClient, name *strin return nil, diags } - transformsResponse := models.GetTransformResponse{} + var transformsResponse models.GetTransformResponse if err := json.NewDecoder(res.Body).Decode(&transformsResponse); err != nil { return nil, diag.FromErr(err) } + var foundTransform *models.Transform = nil for _, t := range transformsResponse.Transforms { if t.Id == *name { - t.Name = *name - return &t, diags + foundTransform = &t + break } } - return nil, diags + if foundTransform == nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Unable to find the transform in the cluster", + Detail: fmt.Sprintf(`Unable to find "%s" transform in the cluster`, *name), + }) + + return nil, diags + } + + foundTransform.Name = *name + return foundTransform, diags +} + +func GetTransformStats(ctx context.Context, apiClient *clients.ApiClient, name *string) (*models.TransformStats, diag.Diagnostics) { + var diags diag.Diagnostics + esClient, err := apiClient.GetESClient() + if err != nil { + return nil, diag.FromErr(err) + } + + getStatsOptions := []func(*esapi.TransformGetTransformStatsRequest){ + esClient.TransformGetTransformStats.WithContext(ctx), + } + + statsRes, err := esClient.TransformGetTransformStats(*name, getStatsOptions...) + if err != nil { + return nil, diag.FromErr(err) + } + + defer statsRes.Body.Close() + if diags := utils.CheckError(statsRes, fmt.Sprintf("Unable to get transform stats: %s", *name)); diags.HasError() { + return nil, diags + } + + var transformsStatsResponse models.GetTransformStatsResponse + if err := json.NewDecoder(statsRes.Body).Decode(&transformsStatsResponse); err != nil { + return nil, diag.FromErr(err) + } + + var foundTransformStats *models.TransformStats = nil + for _, ts := range transformsStatsResponse.TransformStats { + if ts.Id == *name { + foundTransformStats = &ts + break + } + } + + if foundTransformStats == nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Unable to find the transform stats in the cluster", + Detail: fmt.Sprintf(`Unable to find "%s" transform stats in the cluster`, *name), + }) + return nil, diags + } + + return foundTransformStats, diags } func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.UpdateTransformParams) diag.Diagnostics { @@ -158,11 +216,13 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor timeout = 0 } - if params.Enabled { + if params.Enabled && !params.WasEnabled { if diags := startTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { return diags } - } else { + } + + if !params.Enabled && params.WasEnabled { if diags := stopTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { return diags } diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 4be7ab730..16a28d280 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -60,14 +60,16 @@ func ResourceTransform() *schema.Resource { Description: "A query clause that retrieves a subset of data from the source index.", Type: schema.TypeString, Optional: true, - Default: `{"match_all":{}}}`, + Default: `{"match_all":{}}`, DiffSuppressFunc: utils.DiffJsonSuppress, ValidateFunc: validation.StringIsJSON, }, "runtime_mappings": { - Description: "Definitions of search-time runtime fields that can be used by the transform.", - Type: schema.TypeString, - Optional: true, + Description: "Definitions of search-time runtime fields that can be used by the transform.", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: utils.DiffJsonSuppress, + ValidateFunc: validation.StringIsJSON, }, }, }, @@ -132,7 +134,7 @@ func ResourceTransform() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "time": { - Description: "Specifies that the transform uses a time field to set the retention policy.", + Description: "Specifies that the transform uses a time field to set the retention policy. This is currently the only supported option.", Type: schema.TypeList, Required: true, MaxItems: 1, @@ -163,7 +165,7 @@ func ResourceTransform() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "time": { - Description: "Specifies that the transform uses a time field to synchronize the source and destination indices.", + Description: "Specifies that the transform uses a time field to synchronize the source and destination indices. This is currently the only supported option.", Type: schema.TypeList, Required: true, MaxItems: 1, @@ -187,54 +189,50 @@ func ResourceTransform() *schema.Resource { }, }, }, - "settings": { - Description: "Defines optional transform settings.", - Type: schema.TypeList, + "align_checkpoints": { + Description: "Specifies whether the transform checkpoint ranges should be optimized for performance.", + Type: schema.TypeBool, Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "align_checkpoints": { - Description: "Specifies whether the transform checkpoint ranges should be optimized for performance. Default value is true.", - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "dates_as_epoch_millis": { - Description: "Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch.", - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "deduce_mappings": { - Description: "Specifies whether the transform should deduce the destination index mappings from the transform config. The default value is true", - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "docs_per_second": { - Description: "Specifies a limit on the number of input documents per second. Default value is null, which disables throttling.", - Type: schema.TypeFloat, - Optional: true, - }, - "max_page_search_size": { - Description: "Defines the initial page size to use for the composite aggregation for each checkpoint. The default value is 500.", - Type: schema.TypeInt, - Optional: true, - }, - "num_failure_retries": { - Description: "Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries.", - Type: schema.TypeInt, - Optional: true, - }, - "unattended": { - Description: "In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false.", - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, + Default: true, + }, + "dates_as_epoch_millis": { + Description: "Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch.", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "deduce_mappings": { + Description: "Specifies whether the transform should deduce the destination index mappings from the transform config.", + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "docs_per_second": { + Description: "Specifies a limit on the number of input documents per second. Default (unset) value disables throttling.", + Type: schema.TypeFloat, + Optional: true, + Default: -1, + ValidateFunc: validation.FloatAtLeast(0), + }, + "max_page_search_size": { + Description: "Defines the initial page size to use for the composite aggregation for each checkpoint. Default is 500.", + Type: schema.TypeInt, + Optional: true, + Default: -1, + ValidateFunc: validation.IntBetween(10, 65536), + }, + "num_failure_retries": { + Description: "Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries.", + Type: schema.TypeInt, + Optional: true, + Default: -2, + ValidateFunc: validation.IntBetween(-1, 100), + }, + "unattended": { + Description: "In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails.", + Type: schema.TypeBool, + Optional: true, + Default: false, }, "defer_validation": { Type: schema.TypeBool, @@ -321,6 +319,8 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int return diag.FromErr(err) } + // actual resource state is established from two sources: the transform definition (model) and the transform stats + // 1. read transform definition transform, diags := elasticsearch.GetTransform(ctx, client, &transformName) if transform == nil && diags == nil { tflog.Warn(ctx, fmt.Sprintf(`Transform "%s" not found, removing from state`, compId.ResourceId)) @@ -331,6 +331,20 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int return diags } + if err := updateResourceDataFromModel(ctx, d, transform); err != nil { + return diag.FromErr(err) + } + + // 2. read transform stats + transformStats, diags := elasticsearch.GetTransformStats(ctx, client, &transformName) + if diags.HasError() { + return diags + } + + if err := updateResourceDataFromStats(ctx, d, transformStats); err != nil { + return diag.FromErr(err) + } + return diags } @@ -367,6 +381,12 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i params.Enabled = d.Get("enabled").(bool) + transformStats, diags := elasticsearch.GetTransformStats(ctx, client, &transformName) + if diags.HasError() { + return diags + } + params.WasEnabled = transformStats.IsStarted() + if diags := elasticsearch.UpdateTransform(ctx, client, updatedTransform, ¶ms); diags.HasError() { return diags } @@ -507,41 +527,245 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n } } - if v, ok := d.GetOk("settings"); ok { - definedSettings := v.([]interface{})[0].(map[string]interface{}) + // settings + settings := models.TransformSettings{} + setSettings := false + + if v, ok := d.GetOk("align_checkpoints"); ok { + setSettings = true + ac := v.(bool) + settings.AlignCheckpoints = &ac + } + if v, ok := d.GetOk("dates_as_epoch_millis"); ok { + setSettings = true + dem := v.(bool) + settings.DatesAsEpochMillis = &dem + } + if v, ok := d.GetOk("deduce_mappings"); ok { + setSettings = true + dm := v.(bool) + settings.DeduceMappings = &dm + } + if v, ok := d.GetOk("docs_per_second"); ok && v.(float64) >= 0 { + setSettings = true + dps := v.(float64) + settings.DocsPerSecond = &dps + } + if v, ok := d.GetOk("max_page_search_size"); ok && v.(int) > 10 { + setSettings = true + mpss := v.(int) + settings.MaxPageSearchSize = &mpss + } + if v, ok := d.GetOk("num_failure_retries"); ok && v.(int) >= -1 { + setSettings = true + nfr := v.(int) + settings.NumFailureRetries = &nfr + } + if v, ok := d.GetOk("unattended"); ok { + setSettings = true + u := v.(bool) + settings.Unattended = &u + } + + if setSettings { + transform.Settings = &settings + } + + return &transform, nil +} + +func updateResourceDataFromModel(ctx context.Context, d *schema.ResourceData, transform *models.Transform) error { + + // transform.Description + if err := d.Set("description", transform.Description); err != nil { + return err + } + + // transform.Source + if err := d.Set("source", flattenSource(transform.Source)); err != nil { + return err + } + + // transform.Destination + if err := d.Set("destination", flattenDestination(transform.Destination)); err != nil { + return err + } + + // transform.Frequency + if err := d.Set("frequency", transform.Frequency); err != nil { + return err + } + + // transform.Sync + if err := d.Set("sync", flattenSync(transform.Sync)); err != nil { + return err + } + + // transform.RetentionPolicy + if err := d.Set("retention_policy", flattenRetentionPolicy(transform.RetentionPolicy)); err != nil { + return err + } + + // transform.Settings + if transform.Settings != nil && transform.Settings.AlignCheckpoints != nil { + if err := d.Set("align_checkpoints", *(transform.Settings.AlignCheckpoints)); err != nil { + return err + } + } + + if transform.Settings != nil && transform.Settings.DatesAsEpochMillis != nil { + if err := d.Set("dates_as_epoch_millis", *(transform.Settings.DatesAsEpochMillis)); err != nil { + return err + } + } - settings := models.TransformSettings{} - if v, ok := definedSettings["align_checkpoints"]; ok { - ac := v.(bool) - settings.AlignCheckpoints = &ac + if transform.Settings != nil && transform.Settings.DeduceMappings != nil { + if err := d.Set("deduce_mappings", *(transform.Settings.DeduceMappings)); err != nil { + return err } - if v, ok := definedSettings["dates_as_epoch_millis"]; ok { - dem := v.(bool) - settings.DatesAsEpochMillis = &dem + } + + if transform.Settings != nil && transform.Settings.DocsPerSecond != nil { + if err := d.Set("docs_per_second", *(transform.Settings.DocsPerSecond)); err != nil { + return err + } + } + + if transform.Settings != nil && transform.Settings.MaxPageSearchSize != nil { + if err := d.Set("max_page_search_size", *(transform.Settings.MaxPageSearchSize)); err != nil { + return err } - if v, ok := definedSettings["deduce_mappings"]; ok { - dm := v.(bool) - settings.DeduceMappings = &dm + } + + if transform.Settings != nil && transform.Settings.NumFailureRetries != nil { + if err := d.Set("num_failure_retries", *(transform.Settings.NumFailureRetries)); err != nil { + return err } - if v, ok := definedSettings["docs_per_second"]; ok { - dps := v.(float64) - settings.DocsPerSecond = &dps + } + + if transform.Settings != nil && transform.Settings.Unattended != nil { + if err := d.Set("unattended", *(transform.Settings.Unattended)); err != nil { + return err } - if v, ok := definedSettings["max_page_search_size"]; ok { - mpss := v.(int) - settings.MaxPageSearchSize = &mpss + } + + // transform.Meta + if transform.Meta == nil { + if err := d.Set("metadata", nil); err != nil { + return err } - if v, ok := definedSettings["num_failure_retries"]; ok { - nfr := v.(int) - settings.NumFailureRetries = &nfr + } else { + meta, err := json.Marshal(transform.Meta) + if err != nil { + return err } - if v, ok := definedSettings["unattended"]; ok { - u := v.(bool) - settings.Unattended = &u + + if err := d.Set("metadata", string(meta)); err != nil { + return err } + } - transform.Settings = &settings + return nil +} + +func updateResourceDataFromStats(ctx context.Context, d *schema.ResourceData, transformStats *models.TransformStats) error { + + // transform.Enabled + if err := d.Set("enabled", transformStats.IsStarted()); err != nil { + return err } - return &transform, nil + return nil +} + +func flattenSource(source *models.TransformSource) []interface{} { + if source == nil { + return []interface{}{} + } + + s := make(map[string]interface{}) + + if source.Indices != nil { + s["indices"] = source.Indices + } + + if source.Query != nil { + query, err := json.Marshal(source.Query) + if err != nil { + return []interface{}{} + } + if len(query) > 0 { + s["query"] = string(query) + } + } + + if source.RuntimeMappings != nil { + rm, err := json.Marshal(source.RuntimeMappings) + if err != nil { + return []interface{}{} + } + if len(rm) > 0 { + s["runtime_mappings"] = string(rm) + } + } + + return []interface{}{s} +} + +func flattenDestination(dest *models.TransformDestination) []interface{} { + if dest == nil { + return []interface{}{} + } + + d := make(map[string]interface{}) + + d["index"] = dest.Index + + if dest.Pipeline != "" { + d["pipeline"] = dest.Pipeline + } + + return []interface{}{d} +} + +func flattenSync(sync *models.TransformSync) []interface{} { + if sync == nil { + return nil + } + + time := make(map[string]interface{}) + + if sync.Time.Delay != "" { + time["delay"] = sync.Time.Delay + } + + if sync.Time.Field != "" { + time["field"] = sync.Time.Field + } + + s := make(map[string]interface{}) + s["time"] = []interface{}{time} + + return []interface{}{s} +} + +func flattenRetentionPolicy(retention *models.TransformRetentionPolicy) []interface{} { + if retention == nil { + return []interface{}{} + } + + time := make(map[string]interface{}) + + if retention.Time.MaxAge != "" { + time["max_age"] = retention.Time.MaxAge + } + + if retention.Time.Field != "" { + time["field"] = retention.Time.Field + } + + r := make(map[string]interface{}) + r["time"] = []interface{}{time} + + return []interface{}{r} } diff --git a/internal/models/transform.go b/internal/models/transform.go index 7a13f4a80..f1a3438f4 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -16,7 +16,7 @@ type Transform struct { Frequency string `json:"frequency,omitempty"` RetentionPolicy *TransformRetentionPolicy `json:"retention_policy,omitempty"` Sync *TransformSync `json:"sync,omitempty"` - Meta map[string]interface{} `json:"_meta,omitempty"` + Meta interface{} `json:"_meta,omitempty"` Settings *TransformSettings `json:"settings,omitempty"` } @@ -69,9 +69,24 @@ type UpdateTransformParams struct { DeferValidation bool Timeout time.Duration Enabled bool + WasEnabled bool } type GetTransformResponse struct { - Count json.Number `json:"count,omitempty"` + Count json.Number `json:"count"` Transforms []Transform `json:"transforms"` } + +type TransformStats struct { + Id string `json:"id"` + State string `json:"state"` +} + +type GetTransformStatsResponse struct { + Count json.Number `json:"count"` + TransformStats []TransformStats `json:"transforms"` +} + +func (ts *TransformStats) IsStarted() bool { + return ts.State == "started" || ts.State == "indexing" +} From 1b5db028fde79c36918e4fb65ec42a0aff588503 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Sun, 12 Mar 2023 12:44:50 +0200 Subject: [PATCH 20/26] check versions before using features --- docs/resources/elasticsearch_transform.md | 26 ++---- .../resource.tf | 5 +- internal/clients/elasticsearch/transform.go | 38 ++++++-- internal/elasticsearch/transform/transform.go | 90 +++++++++++++------ .../elasticsearch/transform/transform_test.go | 50 ++++++----- internal/models/transform.go | 2 +- 6 files changed, 135 insertions(+), 76 deletions(-) diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 5d43647f8..2b057dc75 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -76,17 +76,23 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { ### Optional +- `align_checkpoints` (Boolean) Specifies whether the transform checkpoint ranges should be optimized for performance. +- `dates_as_epoch_millis` (Boolean) Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch. +- `deduce_mappings` (Boolean) Specifies whether the transform should deduce the destination index mappings from the transform config. - `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. - `description` (String) Free text description of the transform. +- `docs_per_second` (Number) Specifies a limit on the number of input documents per second. Default (unset) value disables throttling. - `enabled` (Boolean) Controls wether the transform is started or stopped. Default is `false` (stopped). - `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`. - `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present. +- `max_page_search_size` (Number) Defines the initial page size to use for the composite aggregation for each checkpoint. Default is 500. - `metadata` (String) Defines optional transform metadata. +- `num_failure_retries` (Number) Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries. - `pivot` (String) The pivot method transforms the data by aggregating and grouping it. JSON definition expected. Either 'pivot' or 'latest' must be present. - `retention_policy` (Block List, Max: 1) Defines a retention policy for the transform. (see [below for nested schema](#nestedblock--retention_policy)) -- `settings` (Block List, Max: 1) Defines optional transform settings. (see [below for nested schema](#nestedblock--settings)) - `sync` (Block List, Max: 1) Defines the properties transforms require to run continuously. (see [below for nested schema](#nestedblock--sync)) - `timeout` (String) Period to wait for a response from Elastisearch when performing any management operation. If no response is received before the timeout expires, the operation fails and returns an error. Defaults to `30s`. +- `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. ### Read-Only @@ -122,7 +128,7 @@ Optional: Required: -- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to set the retention policy. (see [below for nested schema](#nestedblock--retention_policy--time)) +- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to set the retention policy. This is currently the only supported option. (see [below for nested schema](#nestedblock--retention_policy--time)) ### Nested Schema for `retention_policy.time` @@ -134,26 +140,12 @@ Required: - -### Nested Schema for `settings` - -Optional: - -- `align_checkpoints` (Boolean) Specifies whether the transform checkpoint ranges should be optimized for performance. Default value is true. -- `dates_as_epoch_millis` (Boolean) Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch. -- `deduce_mappings` (Boolean) Specifies whether the transform should deduce the destination index mappings from the transform config. The default value is true -- `docs_per_second` (Number) Specifies a limit on the number of input documents per second. Default value is null, which disables throttling. -- `max_page_search_size` (Number) Defines the initial page size to use for the composite aggregation for each checkpoint. The default value is 500. -- `num_failure_retries` (Number) Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries. -- `unattended` (Boolean) In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails. Defaults to false. - - ### Nested Schema for `sync` Required: -- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to synchronize the source and destination indices. (see [below for nested schema](#nestedblock--sync--time)) +- `time` (Block List, Min: 1, Max: 1) Specifies that the transform uses a time field to synchronize the source and destination indices. This is currently the only supported option. (see [below for nested schema](#nestedblock--sync--time)) ### Nested Schema for `sync.time` diff --git a/examples/resources/elasticstack_elasticsearch_transform/resource.tf b/examples/resources/elasticstack_elasticsearch_transform/resource.tf index 68e44b8d5..4baab47ad 100644 --- a/examples/resources/elasticstack_elasticsearch_transform/resource.tf +++ b/examples/resources/elasticstack_elasticsearch_transform/resource.tf @@ -3,7 +3,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { description = "A meaningful description" source { - indices = ["name_or_pattern_for_input_index"] + indices = ["names_or_patterns_for_input_index"] } destination { @@ -44,7 +44,8 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { } } - enabled = false + max_page_search_size = 2000 + enabled = false defer_validation = false } diff --git a/internal/clients/elasticsearch/transform.go b/internal/clients/elasticsearch/transform.go index 0f2d4d4eb..f50417e6b 100644 --- a/internal/clients/elasticsearch/transform.go +++ b/internal/clients/elasticsearch/transform.go @@ -17,6 +17,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" ) +var transformFeatureMinSupportedVersion = version.Must(version.NewVersion("7.2.0")) + var apiOperationTimeoutParamMinSupportedVersion = version.Must(version.NewVersion("7.17.0")) func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform *models.Transform, params *models.PutTransformParams) diag.Diagnostics { @@ -37,6 +39,15 @@ func PutTransform(ctx context.Context, apiClient *clients.ApiClient, transform * return diags } + if serverVersion.LessThan(transformFeatureMinSupportedVersion) { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Transforms not supported", + Detail: fmt.Sprintf(`Transform feature requires a minimum Elasticsearch version of "%s"`, transformFeatureMinSupportedVersion), + }) + return diags + } + withTimeout := serverVersion.GreaterThanOrEqual(apiOperationTimeoutParamMinSupportedVersion) putOptions := []func(*esapi.TransformPutTransformRequest){ @@ -188,6 +199,15 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor return diags } + if serverVersion.LessThan(transformFeatureMinSupportedVersion) { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Transforms not supported", + Detail: fmt.Sprintf(`Transform feature requires a minimum Elasticsearch version of "%s"`, transformFeatureMinSupportedVersion), + }) + return diags + } + withTimeout := serverVersion.GreaterThanOrEqual(apiOperationTimeoutParamMinSupportedVersion) updateOptions := []func(*esapi.TransformUpdateTransformRequest){ @@ -216,15 +236,15 @@ func UpdateTransform(ctx context.Context, apiClient *clients.ApiClient, transfor timeout = 0 } - if params.Enabled && !params.WasEnabled { - if diags := startTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { - return diags - } - } - - if !params.Enabled && params.WasEnabled { - if diags := stopTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { - return diags + if params.ApplyEnabled { + if params.Enabled { + if diags := startTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { + return diags + } + } else { + if diags := stopTransform(ctx, esClient, transform.Name, timeout); diags.HasError() { + return diags + } } } diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 16a28d280..4356bbd1c 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -12,12 +12,35 @@ import ( "github.com/elastic/terraform-provider-elasticstack/internal/clients/elasticsearch" "github.com/elastic/terraform-provider-elasticstack/internal/models" "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) +var settingsRequiredVersions map[string]*version.Version + +func init() { + settingsRequiredVersions = make(map[string]*version.Version) + + // capabilities + settingsRequiredVersions["frequency"], _ = version.NewVersion("7.3.0") + settingsRequiredVersions["latest"], _ = version.NewVersion("7.11.0") + settingsRequiredVersions["retention_policy"], _ = version.NewVersion("7.12.0") + settingsRequiredVersions["source.runtime_mappings"], _ = version.NewVersion("7.12.0") + settingsRequiredVersions["metadata"], _ = version.NewVersion("7.16.0") + + // settings + settingsRequiredVersions["docs_per_second"], _ = version.NewVersion("7.8.0") + settingsRequiredVersions["max_page_search_size"], _ = version.NewVersion("7.8.0") + settingsRequiredVersions["dates_as_epoch_millis"], _ = version.NewVersion("7.11.0") + settingsRequiredVersions["align_checkpoints"], _ = version.NewVersion("7.11.0") + settingsRequiredVersions["deduce_mappings"], _ = version.NewVersion("8.1.0") + settingsRequiredVersions["num_failure_retries"], _ = version.NewVersion("8.4.0") + settingsRequiredVersions["unattended"], _ = version.NewVersion("8.5.0") +} + func ResourceTransform() *schema.Resource { transformSchema := map[string]*schema.Schema{ "id": { @@ -279,7 +302,12 @@ func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta i return diags } - transform, err := getTransformFromResourceData(ctx, d, transformName) + serverVersion, diags := client.ServerVersion(ctx) + if diags.HasError() { + return diags + } + + transform, err := getTransformFromResourceData(ctx, d, transformName, serverVersion) if err != nil { return diag.FromErr(err) } @@ -361,7 +389,12 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i return diags } - updatedTransform, err := getTransformFromResourceData(ctx, d, transformName) + serverVersion, diags := client.ServerVersion(ctx) + if diags.HasError() { + return diags + } + + updatedTransform, err := getTransformFromResourceData(ctx, d, transformName, serverVersion) if err != nil { return diag.FromErr(err) } @@ -369,23 +402,17 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i updatedTransform.Pivot = nil updatedTransform.Latest = nil - params := models.UpdateTransformParams{ - DeferValidation: d.Get("defer_validation").(bool), - } - timeout, err := time.ParseDuration(d.Get("timeout").(string)) if err != nil { return diag.FromErr(err) } - params.Timeout = timeout - params.Enabled = d.Get("enabled").(bool) - - transformStats, diags := elasticsearch.GetTransformStats(ctx, client, &transformName) - if diags.HasError() { - return diags + params := models.UpdateTransformParams{ + DeferValidation: d.Get("defer_validation").(bool), + Timeout: timeout, + Enabled: d.Get("enabled").(bool), + ApplyEnabled: d.HasChange("enabled"), } - params.WasEnabled = transformStats.IsStarted() if diags := elasticsearch.UpdateTransform(ctx, client, updatedTransform, ¶ms); diags.HasError() { return diags @@ -414,7 +441,7 @@ func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, name string) (*models.Transform, error) { +func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, name string, serverVersion *version.Version) (*models.Transform, error) { var transform models.Transform transform.Name = name @@ -441,7 +468,7 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Source.Query = query } - if v, ok := definedSource["runtime_mappings"]; ok && len(v.(string)) > 0 { + if v, ok := definedSource["runtime_mappings"]; ok && len(v.(string)) > 0 && isSettingAllowed(ctx, "source.runtime_mappings", serverVersion) { var runtimeMappings interface{} if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&runtimeMappings); err != nil { return nil, err @@ -471,7 +498,7 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Pivot = pivot } - if v, ok := d.GetOk("latest"); ok { + if v, ok := d.GetOk("latest"); ok && isSettingAllowed(ctx, "latest", serverVersion) { var latest interface{} if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&latest); err != nil { return nil, err @@ -479,11 +506,11 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Latest = latest } - if v, ok := d.GetOk("frequency"); ok { + if v, ok := d.GetOk("frequency"); ok && isSettingAllowed(ctx, "frequency", serverVersion) { transform.Frequency = v.(string) } - if v, ok := d.GetOk("metadata"); ok { + if v, ok := d.GetOk("metadata"); ok && isSettingAllowed(ctx, "metadata", serverVersion) { var metadata map[string]interface{} if err := json.NewDecoder(strings.NewReader(v.(string))).Decode(&metadata); err != nil { return nil, err @@ -491,7 +518,7 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Meta = metadata } - if v, ok := d.GetOk("retention_policy"); ok && v != nil { + if v, ok := d.GetOk("retention_policy"); ok && v != nil && isSettingAllowed(ctx, "retention_policy", serverVersion) { definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) if v, ok := definedRetentionPolicy["time"]; ok { @@ -531,37 +558,37 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n settings := models.TransformSettings{} setSettings := false - if v, ok := d.GetOk("align_checkpoints"); ok { + if v, ok := d.GetOk("align_checkpoints"); ok && isSettingAllowed(ctx, "align_checkpoints", serverVersion) { setSettings = true ac := v.(bool) settings.AlignCheckpoints = &ac } - if v, ok := d.GetOk("dates_as_epoch_millis"); ok { + if v, ok := d.GetOk("dates_as_epoch_millis"); ok && isSettingAllowed(ctx, "dates_as_epoch_millis", serverVersion) { setSettings = true dem := v.(bool) settings.DatesAsEpochMillis = &dem } - if v, ok := d.GetOk("deduce_mappings"); ok { + if v, ok := d.GetOk("deduce_mappings"); ok && isSettingAllowed(ctx, "deduce_mappings", serverVersion) { setSettings = true dm := v.(bool) settings.DeduceMappings = &dm } - if v, ok := d.GetOk("docs_per_second"); ok && v.(float64) >= 0 { + if v, ok := d.GetOk("docs_per_second"); ok && v.(float64) >= 0 && isSettingAllowed(ctx, "docs_per_second", serverVersion) { setSettings = true dps := v.(float64) settings.DocsPerSecond = &dps } - if v, ok := d.GetOk("max_page_search_size"); ok && v.(int) > 10 { + if v, ok := d.GetOk("max_page_search_size"); ok && v.(int) > 10 && isSettingAllowed(ctx, "max_page_search_size", serverVersion) { setSettings = true mpss := v.(int) settings.MaxPageSearchSize = &mpss } - if v, ok := d.GetOk("num_failure_retries"); ok && v.(int) >= -1 { + if v, ok := d.GetOk("num_failure_retries"); ok && v.(int) >= -1 && isSettingAllowed(ctx, "num_failure_retries", serverVersion) { setSettings = true nfr := v.(int) settings.NumFailureRetries = &nfr } - if v, ok := d.GetOk("unattended"); ok { + if v, ok := d.GetOk("unattended"); ok && isSettingAllowed(ctx, "unattended", serverVersion) { setSettings = true u := v.(bool) settings.Unattended = &u @@ -769,3 +796,14 @@ func flattenRetentionPolicy(retention *models.TransformRetentionPolicy) []interf return []interface{}{r} } + +func isSettingAllowed(ctx context.Context, settingName string, serverVersion *version.Version) bool { + if minVersion, ok := settingsRequiredVersions[settingName]; ok { + if serverVersion.LessThan(minVersion) { + tflog.Warn(ctx, fmt.Sprintf("Setting [%s] not allowed for Elasticsearch server version %v; min required is %v", settingName, *serverVersion, *minVersion)) + return false + } + } + + return true +} diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index 788df9d21..d4ce3c9d2 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -27,6 +27,8 @@ func TestAccResourceTransformWithPivot(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", "source_index_for_transform"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "5m"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "max_page_search_size", "2000"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "enabled", "false"), ), }, { @@ -38,6 +40,8 @@ func TestAccResourceTransformWithPivot(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.1", "additional_index"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform_v2"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "10m"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "max_page_search_size", "1000"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "enabled", "true"), ), }, }, @@ -126,6 +130,15 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } } }) + + sync { + time { + field = "order_date" + delay = "20s" + } + } + + max_page_search_size = 2000 frequency = "5m" enabled = false @@ -157,13 +170,6 @@ resource "elasticstack_elasticsearch_index" "test_source_index_1" { } }) - settings { - setting { - name = "index.number_of_replicas" - value = "2" - } - } - deletion_protection = false wait_for_active_shards = "all" master_timeout = "1m" @@ -183,13 +189,6 @@ resource "elasticstack_elasticsearch_index" "test_source_index_2" { } }) - settings { - setting { - name = "index.number_of_replicas" - value = "2" - } - } - deletion_protection = false wait_for_active_shards = "all" master_timeout = "1m" @@ -228,6 +227,22 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } } }) + + sync { + time { + field = "order_date" + delay = "20s" + } + } + + retention_policy { + time { + field = "order_date" + max_age = "7d" + } + } + + max_page_search_size = 1000 frequency = "10m" enabled = true @@ -287,13 +302,6 @@ resource "elasticstack_elasticsearch_index" "test_index" { } }) - settings { - setting { - name = "index.number_of_replicas" - value = "2" - } - } - deletion_protection = false wait_for_active_shards = "all" master_timeout = "1m" diff --git a/internal/models/transform.go b/internal/models/transform.go index f1a3438f4..2973219e8 100644 --- a/internal/models/transform.go +++ b/internal/models/transform.go @@ -69,7 +69,7 @@ type UpdateTransformParams struct { DeferValidation bool Timeout time.Duration Enabled bool - WasEnabled bool + ApplyEnabled bool } type GetTransformResponse struct { From a00822ce27a42e68d93f964aa0eb998f394ce2bf Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Sun, 12 Mar 2023 12:50:05 +0200 Subject: [PATCH 21/26] updated doc --- docs/resources/elasticsearch_transform.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 2b057dc75..9f97ec4db 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -18,7 +18,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { description = "A meaningful description" source { - indices = ["name_or_pattern_for_input_index"] + indices = ["names_or_patterns_for_input_index"] } destination { @@ -59,8 +59,9 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { } } - enabled = false + max_page_search_size = 2000 + enabled = false defer_validation = false } ``` From 4f4eb568c757538c7153e8f0058d9fba8c18ea9d Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Sun, 12 Mar 2023 14:58:01 +0200 Subject: [PATCH 22/26] support for terraform import --- docs/resources/elasticsearch_transform.md | 6 ++++- .../import.sh | 1 + internal/elasticsearch/transform/transform.go | 26 +++++++++++++++++++ .../resources/elasticsearch_transform.md.tmpl | 4 ++- 4 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 examples/resources/elasticstack_elasticsearch_transform/import.sh diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 9f97ec4db..e893dbdc3 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -161,4 +161,8 @@ Optional: ## Import -Not implemented yet. +Import is supported using the following syntax: + +```shell +terraform import elasticstack_elasticsearch_tranform.my_new_transform / +``` diff --git a/examples/resources/elasticstack_elasticsearch_transform/import.sh b/examples/resources/elasticstack_elasticsearch_transform/import.sh new file mode 100644 index 000000000..01a277ec7 --- /dev/null +++ b/examples/resources/elasticstack_elasticsearch_transform/import.sh @@ -0,0 +1 @@ +terraform import elasticstack_elasticsearch_tranform.my_new_transform / diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index 4356bbd1c..bcba9996d 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -286,6 +286,10 @@ func ResourceTransform() *schema.Resource { ReadContext: resourceTransformRead, UpdateContext: resourceTransformUpdate, DeleteContext: resourceTransformDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, } } @@ -618,6 +622,28 @@ func updateResourceDataFromModel(ctx context.Context, d *schema.ResourceData, tr return err } + // transform.Pivot + if transform.Pivot != nil { + pivot, err := json.Marshal(transform.Pivot) + if err != nil { + return err + } + if err := d.Set("pivot", string(pivot)); err != nil { + return err + } + } + + // transform.Latest + if transform.Latest != nil { + latest, err := json.Marshal(transform.Latest) + if err != nil { + return err + } + if err := d.Set("latest", string(latest)); err != nil { + return err + } + } + // transform.Frequency if err := d.Set("frequency", transform.Frequency); err != nil { return err diff --git a/templates/resources/elasticsearch_transform.md.tmpl b/templates/resources/elasticsearch_transform.md.tmpl index be274e4f0..f30e89140 100644 --- a/templates/resources/elasticsearch_transform.md.tmpl +++ b/templates/resources/elasticsearch_transform.md.tmpl @@ -18,4 +18,6 @@ Creates, updates, starts and stops a transform. See: https://www.elastic.co/guid ## Import -Not implemented yet. +Import is supported using the following syntax: + +{{ codefile "shell" "examples/resources/elasticstack_elasticsearch_transform/import.sh" }} From e2ebb7927f8b693bbc31de8c500fb45a6b09edf7 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 14 Mar 2023 21:09:34 +0200 Subject: [PATCH 23/26] minor corrections/updates --- docs/resources/elasticsearch_transform.md | 2 +- internal/elasticsearch/transform/transform.go | 36 +++++++++++-------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index e893dbdc3..7d21c464a 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -83,7 +83,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. - `description` (String) Free text description of the transform. - `docs_per_second` (Number) Specifies a limit on the number of input documents per second. Default (unset) value disables throttling. -- `enabled` (Boolean) Controls wether the transform is started or stopped. Default is `false` (stopped). +- `enabled` (Boolean) Controls wether the transform should be started or stopped. Default is `false` (stopped). - `frequency` (String) The interval between checks for changes in the source indices when the transform is running continuously. Defaults to `1m`. - `latest` (String) The latest method transforms the data by finding the latest document for each unique key. JSON definition expected. Either 'pivot' or 'latest' must be present. - `max_page_search_size` (Number) Defines the initial page size to use for the composite aggregation for each checkpoint. Default is 500. diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index bcba9996d..db69494e7 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -25,20 +25,21 @@ func init() { settingsRequiredVersions = make(map[string]*version.Version) // capabilities - settingsRequiredVersions["frequency"], _ = version.NewVersion("7.3.0") - settingsRequiredVersions["latest"], _ = version.NewVersion("7.11.0") - settingsRequiredVersions["retention_policy"], _ = version.NewVersion("7.12.0") - settingsRequiredVersions["source.runtime_mappings"], _ = version.NewVersion("7.12.0") - settingsRequiredVersions["metadata"], _ = version.NewVersion("7.16.0") + settingsRequiredVersions["destination.pipeline"] = version.Must(version.NewVersion("7.3.0")) + settingsRequiredVersions["frequency"] = version.Must(version.NewVersion("7.3.0")) + settingsRequiredVersions["latest"] = version.Must(version.NewVersion("7.11.0")) + settingsRequiredVersions["retention_policy"] = version.Must(version.NewVersion("7.12.0")) + settingsRequiredVersions["source.runtime_mappings"] = version.Must(version.NewVersion("7.12.0")) + settingsRequiredVersions["metadata"] = version.Must(version.NewVersion("7.16.0")) // settings - settingsRequiredVersions["docs_per_second"], _ = version.NewVersion("7.8.0") - settingsRequiredVersions["max_page_search_size"], _ = version.NewVersion("7.8.0") - settingsRequiredVersions["dates_as_epoch_millis"], _ = version.NewVersion("7.11.0") - settingsRequiredVersions["align_checkpoints"], _ = version.NewVersion("7.11.0") - settingsRequiredVersions["deduce_mappings"], _ = version.NewVersion("8.1.0") - settingsRequiredVersions["num_failure_retries"], _ = version.NewVersion("8.4.0") - settingsRequiredVersions["unattended"], _ = version.NewVersion("8.5.0") + settingsRequiredVersions["docs_per_second"] = version.Must(version.NewVersion("7.8.0")) + settingsRequiredVersions["max_page_search_size"] = version.Must(version.NewVersion("7.8.0")) + settingsRequiredVersions["dates_as_epoch_millis"] = version.Must(version.NewVersion("7.11.0")) + settingsRequiredVersions["align_checkpoints"] = version.Must(version.NewVersion("7.11.0")) + settingsRequiredVersions["deduce_mappings"] = version.Must(version.NewVersion("8.1.0")) + settingsRequiredVersions["num_failure_retries"] = version.Must(version.NewVersion("8.4.0")) + settingsRequiredVersions["unattended"] = version.Must(version.NewVersion("8.5.0")) } func ResourceTransform() *schema.Resource { @@ -108,6 +109,12 @@ func ResourceTransform() *schema.Resource { Description: "The destination index for the transform.", Type: schema.TypeString, Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 255), + validation.StringNotInSlice([]string{".", ".."}, true), + validation.StringMatch(regexp.MustCompile(`^[^-_+]`), "cannot start with -, _, +"), + validation.StringMatch(regexp.MustCompile(`^[a-z0-9!$%&'()+.;=@[\]^{}~_-]+$`), "must contain lower case alphanumeric characters and selected punctuation, see: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html#indices-create-api-path-params"), + ), }, "pipeline": { Description: "The unique identifier for an ingest pipeline.", @@ -272,7 +279,7 @@ func ResourceTransform() *schema.Resource { }, "enabled": { Type: schema.TypeBool, - Description: "Controls wether the transform is started or stopped. Default is `false` (stopped).", + Description: "Controls wether the transform should be started or stopped. Default is `false` (stopped).", Optional: true, Default: false, }, @@ -403,6 +410,7 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i return diag.FromErr(err) } + // pivot and latest cannot be updated; sending them to the API for an update operation would result in an error updatedTransform.Pivot = nil updatedTransform.Latest = nil @@ -489,7 +497,7 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n Index: definedDestination["index"].(string), } - if pipeline, ok := definedDestination["pipeline"]; ok && len(pipeline.(string)) > 0 { + if pipeline, ok := definedDestination["pipeline"]; ok && isSettingAllowed(ctx, "destination.pipeline", serverVersion) { transform.Destination.Pipeline = pipeline.(string) } } From b900984b6cfbf0aa0f9149f011593697ccf24cf0 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 14 Mar 2023 21:17:29 +0200 Subject: [PATCH 24/26] corrected min version for align_checkpoints --- internal/elasticsearch/transform/transform.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index db69494e7..ba7fe9c68 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -36,7 +36,7 @@ func init() { settingsRequiredVersions["docs_per_second"] = version.Must(version.NewVersion("7.8.0")) settingsRequiredVersions["max_page_search_size"] = version.Must(version.NewVersion("7.8.0")) settingsRequiredVersions["dates_as_epoch_millis"] = version.Must(version.NewVersion("7.11.0")) - settingsRequiredVersions["align_checkpoints"] = version.Must(version.NewVersion("7.11.0")) + settingsRequiredVersions["align_checkpoints"] = version.Must(version.NewVersion("7.15.0")) settingsRequiredVersions["deduce_mappings"] = version.Must(version.NewVersion("8.1.0")) settingsRequiredVersions["num_failure_retries"] = version.Must(version.NewVersion("8.4.0")) settingsRequiredVersions["unattended"] = version.Must(version.NewVersion("8.5.0")) From 0bedc945dd66e2453735a8488f0bcfe9476c373c Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Tue, 14 Mar 2023 22:14:20 +0200 Subject: [PATCH 25/26] updates on acc tests --- internal/elasticsearch/transform/transform_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index d4ce3c9d2..556e2b5c6 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -29,6 +29,10 @@ func TestAccResourceTransformWithPivot(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "5m"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "max_page_search_size", "2000"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "enabled", "false"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "sync.0.time.0.field", "order_date"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "sync.0.time.0.delay", "20s"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "defer_validation", "true"), + resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "latest"), ), }, { @@ -42,6 +46,10 @@ func TestAccResourceTransformWithPivot(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "10m"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "max_page_search_size", "1000"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "enabled", "true"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "retention_policy.0.time.0.field", "order_date"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "retention_policy.0.time.0.max_age", "7d"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "defer_validation", "true"), + resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "latest"), ), }, }, @@ -64,6 +72,8 @@ func TestAccResourceTransformWithLatest(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "source.0.indices.0", "source_index_for_transform"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "destination.0.index", "dest_index_for_transform"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "frequency", "2m"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_latest", "defer_validation", "true"), + resource.TestCheckNoResourceAttr("elasticstack_elasticsearch_transform.test_latest", "pivot"), ), }, }, @@ -87,6 +97,7 @@ func TestAccResourceTransformNoDefer(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.0", indexName), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "5m"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "defer_validation", "false"), ), }, }, From 3f447eab056bc9176cfa89fbeed8af5e9ecfcf80 Mon Sep 17 00:00:00 2001 From: Cristian Georgiu Date: Sat, 18 Mar 2023 15:50:49 +0200 Subject: [PATCH 26/26] removed some defaults; updated docs; updated acc test --- docs/resources/elasticsearch_transform.md | 4 +- internal/elasticsearch/transform/transform.go | 31 ++-- .../elasticsearch/transform/transform_test.go | 138 +++++++++--------- .../resources/elasticsearch_transform.md.tmpl | 2 + 4 files changed, 87 insertions(+), 88 deletions(-) diff --git a/docs/resources/elasticsearch_transform.md b/docs/resources/elasticsearch_transform.md index 7d21c464a..e54449ca2 100644 --- a/docs/resources/elasticsearch_transform.md +++ b/docs/resources/elasticsearch_transform.md @@ -10,6 +10,8 @@ description: |- Creates, updates, starts and stops a transform. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html +**NOTE:** Some transform settings require a minimum Elasticsearch version. Such settings will be ignored when applied to versions below the required one (a warning will be issued in the logs). + ## Example Usage ```terraform @@ -80,7 +82,7 @@ resource "elasticstack_elasticsearch_transform" "transform_with_pivot" { - `align_checkpoints` (Boolean) Specifies whether the transform checkpoint ranges should be optimized for performance. - `dates_as_epoch_millis` (Boolean) Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch. - `deduce_mappings` (Boolean) Specifies whether the transform should deduce the destination index mappings from the transform config. -- `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. +- `defer_validation` (Boolean) When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. Default is `false` - `description` (String) Free text description of the transform. - `docs_per_second` (Number) Specifies a limit on the number of input documents per second. Default (unset) value disables throttling. - `enabled` (Boolean) Controls wether the transform should be started or stopped. Default is `false` (stopped). diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go index ba7fe9c68..ff376c97c 100644 --- a/internal/elasticsearch/transform/transform.go +++ b/internal/elasticsearch/transform/transform.go @@ -171,9 +171,10 @@ func ResourceTransform() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "field": { - Description: "The date field that is used to calculate the age of the document.", - Type: schema.TypeString, - Required: true, + Description: "The date field that is used to calculate the age of the document.", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotWhiteSpace, }, "max_age": { Description: "Specifies the maximum age of a document in the destination index.", @@ -202,9 +203,10 @@ func ResourceTransform() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "field": { - Description: "The date field that is used to identify new documents in the source.", - Type: schema.TypeString, - Required: true, + Description: "The date field that is used to identify new documents in the source.", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotWhiteSpace, }, "delay": { Description: "The time delay between the current time and the latest input data time. The default value is 60s.", @@ -223,50 +225,43 @@ func ResourceTransform() *schema.Resource { Description: "Specifies whether the transform checkpoint ranges should be optimized for performance.", Type: schema.TypeBool, Optional: true, - Default: true, }, "dates_as_epoch_millis": { Description: "Defines if dates in the output should be written as ISO formatted string (default) or as millis since epoch.", Type: schema.TypeBool, Optional: true, - Default: false, }, "deduce_mappings": { Description: "Specifies whether the transform should deduce the destination index mappings from the transform config.", Type: schema.TypeBool, Optional: true, - Default: true, }, "docs_per_second": { Description: "Specifies a limit on the number of input documents per second. Default (unset) value disables throttling.", Type: schema.TypeFloat, Optional: true, - Default: -1, ValidateFunc: validation.FloatAtLeast(0), }, "max_page_search_size": { Description: "Defines the initial page size to use for the composite aggregation for each checkpoint. Default is 500.", Type: schema.TypeInt, Optional: true, - Default: -1, ValidateFunc: validation.IntBetween(10, 65536), }, "num_failure_retries": { Description: "Defines the number of retries on a recoverable failure before the transform task is marked as failed. The default value is the cluster-level setting num_transform_failure_retries.", Type: schema.TypeInt, Optional: true, - Default: -2, ValidateFunc: validation.IntBetween(-1, 100), }, "unattended": { Description: "In unattended mode, the transform retries indefinitely in case of an error which means the transform never fails.", Type: schema.TypeBool, Optional: true, - Default: false, }, "defer_validation": { Type: schema.TypeBool, - Description: "When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created.", + Description: "When true, deferrable validations are not run upon creation, but rather when the transform is started. This behavior may be desired if the source index does not exist until after the transform is created. Default is `false`", Optional: true, Default: false, }, @@ -530,7 +525,7 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n transform.Meta = metadata } - if v, ok := d.GetOk("retention_policy"); ok && v != nil && isSettingAllowed(ctx, "retention_policy", serverVersion) { + if v, ok := d.GetOk("retention_policy"); ok && isSettingAllowed(ctx, "retention_policy", serverVersion) { definedRetentionPolicy := v.([]interface{})[0].(map[string]interface{}) if v, ok := definedRetentionPolicy["time"]; ok { @@ -585,17 +580,17 @@ func getTransformFromResourceData(ctx context.Context, d *schema.ResourceData, n dm := v.(bool) settings.DeduceMappings = &dm } - if v, ok := d.GetOk("docs_per_second"); ok && v.(float64) >= 0 && isSettingAllowed(ctx, "docs_per_second", serverVersion) { + if v, ok := d.GetOk("docs_per_second"); ok && isSettingAllowed(ctx, "docs_per_second", serverVersion) { setSettings = true dps := v.(float64) settings.DocsPerSecond = &dps } - if v, ok := d.GetOk("max_page_search_size"); ok && v.(int) > 10 && isSettingAllowed(ctx, "max_page_search_size", serverVersion) { + if v, ok := d.GetOk("max_page_search_size"); ok && isSettingAllowed(ctx, "max_page_search_size", serverVersion) { setSettings = true mpss := v.(int) settings.MaxPageSearchSize = &mpss } - if v, ok := d.GetOk("num_failure_retries"); ok && v.(int) >= -1 && isSettingAllowed(ctx, "num_failure_retries", serverVersion) { + if v, ok := d.GetOk("num_failure_retries"); ok && isSettingAllowed(ctx, "num_failure_retries", serverVersion) { setSettings = true nfr := v.(int) settings.NumFailureRetries = &nfr diff --git a/internal/elasticsearch/transform/transform_test.go b/internal/elasticsearch/transform/transform_test.go index 556e2b5c6..0de736470 100644 --- a/internal/elasticsearch/transform/transform_test.go +++ b/internal/elasticsearch/transform/transform_test.go @@ -44,7 +44,7 @@ func TestAccResourceTransformWithPivot(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "source.0.indices.1", "additional_index"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "destination.0.index", "dest_index_for_transform_v2"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "frequency", "10m"), - resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "max_page_search_size", "1000"), + resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "max_page_search_size", "2000"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "enabled", "true"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "retention_policy.0.time.0.field", "order_date"), resource.TestCheckResourceAttr("elasticstack_elasticsearch_transform.test_pivot", "retention_policy.0.time.0.max_age", "7d"), @@ -114,17 +114,17 @@ provider "elasticstack" { resource "elasticstack_elasticsearch_transform" "test_pivot" { name = "%s" - description = "test description" + description = "test description" - source { - indices = ["source_index_for_transform"] - } + source { + indices = ["source_index_for_transform"] + } - destination { - index = "dest_index_for_transform" - } + destination { + index = "dest_index_for_transform" + } - pivot = jsonencode({ + pivot = jsonencode({ "group_by": { "customer_id": { "terms": { @@ -142,21 +142,21 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) - sync { + sync { time { field = "order_date" delay = "20s" } } - max_page_search_size = 2000 + max_page_search_size = 2000 frequency = "5m" - enabled = false + enabled = false - defer_validation = true - timeout = "1m" + defer_validation = true + timeout = "1m" } - `, name) + `, name) } // update the existing transform, add another source index and start it (enabled = true) @@ -181,10 +181,10 @@ resource "elasticstack_elasticsearch_index" "test_source_index_1" { } }) - deletion_protection = false - wait_for_active_shards = "all" - master_timeout = "1m" - timeout = "1m" + deletion_protection = false + wait_for_active_shards = "all" + master_timeout = "1m" + timeout = "1m" } resource "elasticstack_elasticsearch_index" "test_source_index_2" { @@ -200,28 +200,28 @@ resource "elasticstack_elasticsearch_index" "test_source_index_2" { } }) - deletion_protection = false - wait_for_active_shards = "all" - master_timeout = "1m" - timeout = "1m" + deletion_protection = false + wait_for_active_shards = "all" + master_timeout = "1m" + timeout = "1m" } resource "elasticstack_elasticsearch_transform" "test_pivot" { name = "%s" - description = "yet another test description" + description = "yet another test description" - source { - indices = [ - elasticstack_elasticsearch_index.test_source_index_1.name, - elasticstack_elasticsearch_index.test_source_index_2.name - ] - } + source { + indices = [ + elasticstack_elasticsearch_index.test_source_index_1.name, + elasticstack_elasticsearch_index.test_source_index_2.name + ] + } - destination { - index = "dest_index_for_transform_v2" - } + destination { + index = "dest_index_for_transform_v2" + } - pivot = jsonencode({ + pivot = jsonencode({ "group_by": { "customer_id": { "terms": { @@ -239,28 +239,28 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) - sync { + sync { time { field = "order_date" delay = "20s" } } - retention_policy { + retention_policy { time { field = "order_date" max_age = "7d" } } - max_page_search_size = 1000 + max_page_search_size = 2000 frequency = "10m" - enabled = true + enabled = true - defer_validation = true - timeout = "1m" + defer_validation = true + timeout = "1m" } - `, name) + `, name) } func testAccResourceTransformWithLatestCreate(name string) string { @@ -271,27 +271,27 @@ provider "elasticstack" { resource "elasticstack_elasticsearch_transform" "test_latest" { name = "%s" - description = "test description (latest)" + description = "test description (latest)" - source { - indices = ["source_index_for_transform"] - } + source { + indices = ["source_index_for_transform"] + } - destination { - index = "dest_index_for_transform" - } + destination { + index = "dest_index_for_transform" + } - latest = jsonencode({ + latest = jsonencode({ "unique_key": ["customer_id"], "sort": "order_date" }) frequency = "2m" - enabled = false + enabled = false - defer_validation = true - timeout = "1m" + defer_validation = true + timeout = "1m" } - `, name) + `, name) } func testAccResourceTransformNoDeferCreate(transformName, indexName string) string { @@ -313,25 +313,25 @@ resource "elasticstack_elasticsearch_index" "test_index" { } }) - deletion_protection = false - wait_for_active_shards = "all" - master_timeout = "1m" - timeout = "1m" + deletion_protection = false + wait_for_active_shards = "all" + master_timeout = "1m" + timeout = "1m" } resource "elasticstack_elasticsearch_transform" "test_pivot" { name = "%s" - description = "test description" + description = "test description" - source { - indices = [elasticstack_elasticsearch_index.test_index.name] - } + source { + indices = [elasticstack_elasticsearch_index.test_index.name] + } - destination { - index = "dest_index_for_transform" - } + destination { + index = "dest_index_for_transform" + } - pivot = jsonencode({ + pivot = jsonencode({ "group_by": { "customer_id": { "terms": { @@ -349,12 +349,12 @@ resource "elasticstack_elasticsearch_transform" "test_pivot" { } }) frequency = "5m" - enabled = false + enabled = false - defer_validation = false - timeout = "1m" + defer_validation = false + timeout = "1m" } - `, indexName, transformName) + `, indexName, transformName) } func checkResourceTransformDestroy(s *terraform.State) error { diff --git a/templates/resources/elasticsearch_transform.md.tmpl b/templates/resources/elasticsearch_transform.md.tmpl index f30e89140..6c997cef8 100644 --- a/templates/resources/elasticsearch_transform.md.tmpl +++ b/templates/resources/elasticsearch_transform.md.tmpl @@ -10,6 +10,8 @@ description: |- Creates, updates, starts and stops a transform. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/transforms.html +**NOTE:** Some transform settings require a minimum Elasticsearch version. Such settings will be ignored when applied to versions below the required one (a warning will be issued in the logs). + ## Example Usage {{ tffile "examples/resources/elasticstack_elasticsearch_transform/resource.tf" }}