diff --git a/digitalocean/database/resource_database_cluster_test.go b/digitalocean/database/resource_database_cluster_test.go index 88c9667cc..b87b4615f 100644 --- a/digitalocean/database/resource_database_cluster_test.go +++ b/digitalocean/database/resource_database_cluster_test.go @@ -882,6 +882,17 @@ resource "digitalocean_database_cluster" "foobar" { tags = ["production"] }` +const testAccCheckDigitalOceanDatabaseClusterOpensearch = ` +resource "digitalocean_database_cluster" "foobar" { + name = "%s" + engine = "opensearch" + version = "%s" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 1 + tags = ["production"] +}` + const testAccCheckDigitalOceanDatabaseClusterConfigWithEvictionPolicy = ` resource "digitalocean_database_cluster" "foobar" { name = "%s" diff --git a/digitalocean/database/resource_database_opensearch_config.go b/digitalocean/database/resource_database_opensearch_config.go new file mode 100644 index 000000000..1cd952f2c --- /dev/null +++ b/digitalocean/database/resource_database_opensearch_config.go @@ -0,0 +1,533 @@ +package database + +import ( + "context" + "fmt" + "log" + + "github.com/digitalocean/godo" + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceDigitalOceanDatabaseOpensearchConfig() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDigitalOceanDatabaseOpensearchConfigCreate, + ReadContext: resourceDigitalOceanDatabaseOpensearchConfigRead, + UpdateContext: resourceDigitalOceanDatabaseOpensearchConfigUpdate, + DeleteContext: resourceDigitalOceanDatabaseOpensearchConfigDelete, + Importer: &schema.ResourceImporter{ + State: resourceDigitalOceanDatabaseOpensearchConfigImport, + }, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "ism_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "ism_history_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "ism_history_max_age_hours": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "ism_history_max_docs": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "ism_history_rollover_check_period_hours": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "ism_history_rollover_retention_period_days": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "http_max_content_length_bytes": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "http_max_header_size_bytes": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "http_max_initial_line_length_bytes": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1024), + }, + "indices_query_bool_max_clause_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(64), + }, + "search_max_buckets": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "indices_fielddata_cache_size_percentage": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(3), + }, + "indices_memory_index_buffer_size_percentage": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(3), + }, + "indices_memory_min_index_buffer_size_mb": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(3), + }, + "indices_memory_max_index_buffer_size_mb": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(3), + }, + "indices_queries_cache_size_percentage": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(3), + }, + "indices_recovery_max_mb_per_sec": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(40), + }, + "indices_recovery_max_concurrent_file_chunks": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(2), + }, + "action_auto_create_index_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "action_destructive_requires_name": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "enable_security_audit": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "thread_pool_search_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "thread_pool_search_throttled_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "thread_pool_search_throttled_queue_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(10), + }, + "thread_pool_search_queue_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(10), + }, + "thread_pool_get_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "thread_pool_get_queue_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(10), + }, + "thread_pool_analyze_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "thread_pool_analyze_queue_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(10), + }, + "thread_pool_write_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "thread_pool_write_queue_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(10), + }, + "thread_pool_force_merge_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "override_main_response_version": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "script_max_compilations_rate": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "cluster_max_shards_per_node": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(100), + }, + "cluster_routing_allocation_node_concurrent_recoveries": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(2), + }, + "plugins_alerting_filter_by_backend_roles_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "reindex_remote_whitelist": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + }, + } +} + +func resourceDigitalOceanDatabaseOpensearchConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + + if err := updateOpensearchConfig(ctx, d, client); err != nil { + return diag.Errorf("Error updating Opensearch configuration: %s", err) + } + + d.SetId(makeDatabaseOpensearchConfigID(clusterID)) + + return resourceDigitalOceanDatabaseOpensearchConfigRead(ctx, d, meta) +} + +func resourceDigitalOceanDatabaseOpensearchConfigUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + + if err := updateOpensearchConfig(ctx, d, client); err != nil { + return diag.Errorf("Error updating Opensearch configuration: %s", err) + } + + return resourceDigitalOceanDatabaseOpensearchConfigRead(ctx, d, meta) +} + +func updateOpensearchConfig(ctx context.Context, d *schema.ResourceData, client *godo.Client) error { + clusterID := d.Get("cluster_id").(string) + + opts := &godo.OpensearchConfig{} + + if v, ok := d.GetOk("ism_enabled"); ok { + opts.IsmEnabled = godo.PtrTo(v.(bool)) + } + + if v, ok := d.GetOk("ism_history_enabled"); ok { + opts.IsmHistoryEnabled = godo.PtrTo(v.(bool)) + } + + if v, ok := d.GetOk("ism_history_max_age_hours"); ok { + opts.IsmHistoryMaxAgeHours = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("ism_history_max_docs"); ok { + opts.IsmHistoryMaxDocs = godo.PtrTo(int64(v.(int))) + } + + if v, ok := d.GetOk("ism_history_rollover_check_period_hours"); ok { + opts.IsmHistoryRolloverCheckPeriodHours = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("ism_history_rollover_retention_period_days"); ok { + opts.IsmHistoryRolloverRetentionPeriodDays = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("http_max_content_length_bytes"); ok { + opts.HttpMaxContentLengthBytes = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("http_max_header_size_bytes"); ok { + opts.HttpMaxHeaderSizeBytes = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("http_max_initial_line_length_bytes"); ok { + opts.HttpMaxInitialLineLengthBytes = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_query_bool_max_clause_count"); ok { + opts.IndicesQueryBoolMaxClauseCount = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("search_max_buckets"); ok { + opts.SearchMaxBuckets = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_fielddata_cache_size_percentage"); ok { + opts.IndicesFielddataCacheSizePercentage = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_memory_index_buffer_size_percentage"); ok { + opts.IndicesMemoryIndexBufferSizePercentage = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_memory_min_index_buffer_size_mb"); ok { + opts.IndicesMemoryMinIndexBufferSizeMb = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_memory_max_index_buffer_size_mb"); ok { + opts.IndicesMemoryMaxIndexBufferSizeMb = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_queries_cache_size_percentage"); ok { + opts.IndicesQueriesCacheSizePercentage = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_recovery_max_mb_per_sec"); ok { + opts.IndicesRecoveryMaxMbPerSec = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("indices_recovery_max_concurrent_file_chunks"); ok { + opts.IndicesRecoveryMaxConcurrentFileChunks = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("action_auto_create_index_enabled"); ok { + opts.ActionAutoCreateIndexEnabled = godo.PtrTo(v.(bool)) + } + + if v, ok := d.GetOk("action_destructive_requires_name"); ok { + opts.ActionDestructiveRequiresName = godo.PtrTo(v.(bool)) + } + + if v, ok := d.GetOk("enable_security_audit"); ok { + opts.EnableSecurityAudit = godo.PtrTo(v.(bool)) + } + + if v, ok := d.GetOk("thread_pool_search_size"); ok { + opts.ThreadPoolSearchSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_search_throttled_size"); ok { + opts.ThreadPoolSearchThrottledSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_search_throttled_queue_size"); ok { + opts.ThreadPoolSearchThrottledQueueSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_search_queue_size"); ok { + opts.ThreadPoolSearchQueueSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_get_size"); ok { + opts.ThreadPoolGetSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_get_queue_size"); ok { + opts.ThreadPoolGetQueueSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_analyze_size"); ok { + opts.ThreadPoolAnalyzeSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_analyze_queue_size"); ok { + opts.ThreadPoolAnalyzeQueueSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_write_size"); ok { + opts.ThreadPoolWriteSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_write_queue_size"); ok { + opts.ThreadPoolWriteQueueSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("thread_pool_force_merge_size"); ok { + opts.ThreadPoolForceMergeSize = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("override_main_response_version"); ok { + opts.OverrideMainResponseVersion = godo.PtrTo(v.(bool)) + } + + if v, ok := d.GetOk("script_max_compilations_rate"); ok { + opts.ScriptMaxCompilationsRate = godo.PtrTo(v.(string)) + } + + if v, ok := d.GetOk("cluster_max_shards_per_node"); ok { + opts.ClusterMaxShardsPerNode = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("cluster_routing_allocation_node_concurrent_recoveries"); ok { + opts.ClusterRoutingAllocationNodeConcurrentRecoveries = godo.PtrTo(v.(int)) + } + + if v, ok := d.GetOk("plugins_alerting_filter_by_backend_roles_enabled"); ok { + opts.PluginsAlertingFilterByBackendRolesEnabled = godo.PtrTo(v.(bool)) + } + + if v, ok := d.GetOk("reindex_remote_whitelist"); ok { + if exampleSet, ok := v.(*schema.Set); ok { + var items []string + for _, item := range exampleSet.List() { + if str, ok := item.(string); ok { + items = append(items, str) + } else { + return fmt.Errorf("non-string item found in set") // todo: anna update err message + } + } + opts.ReindexRemoteWhitelist = items + } + } + + log.Printf("[DEBUG] Opensearch configuration: %s", godo.Stringify(opts)) + + if _, err := client.Databases.UpdateOpensearchConfig(ctx, clusterID, opts); err != nil { + return err + } + + return nil +} + +func resourceDigitalOceanDatabaseOpensearchConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + + config, resp, err := client.Databases.GetOpensearchConfig(ctx, clusterID) + if err != nil { + if resp != nil && resp.StatusCode == 404 { + d.SetId("") + return nil + } + + return diag.Errorf("Error retrieving Opensearch configuration: %s", err) + } + + d.Set("ism_enabled", config.IsmEnabled) + d.Set("ism_history_enabled", config.IsmHistoryEnabled) + d.Set("ism_history_max_age_hours", config.IsmHistoryMaxAgeHours) + d.Set("ism_history_max_docs", config.IsmHistoryMaxDocs) + d.Set("ism_history_rollover_check_period_hours", config.IsmHistoryRolloverCheckPeriodHours) + d.Set("ism_history_rollover_retention_period_days", config.IsmHistoryRolloverRetentionPeriodDays) + d.Set("http_max_content_length_bytes", config.HttpMaxContentLengthBytes) + d.Set("http_max_header_size_bytes", config.HttpMaxHeaderSizeBytes) + d.Set("http_max_initial_line_length_bytes", config.HttpMaxInitialLineLengthBytes) + d.Set("indices_query_bool_max_clause_count", config.IndicesQueryBoolMaxClauseCount) + d.Set("search_max_buckets", config.SearchMaxBuckets) + d.Set("indices_fielddata_cache_size_percentage", config.IndicesFielddataCacheSizePercentage) + d.Set("indices_memory_index_buffer_size_percentage", config.IndicesMemoryIndexBufferSizePercentage) + d.Set("indices_memory_min_index_buffer_size_mb", config.IndicesMemoryMinIndexBufferSizeMb) + d.Set("indices_memory_max_index_buffer_size_mb", config.IndicesMemoryMaxIndexBufferSizeMb) + d.Set("indices_queries_cache_size_percentage", config.IndicesQueriesCacheSizePercentage) + d.Set("indices_recovery_max_mb_per_sec", config.IndicesRecoveryMaxMbPerSec) + d.Set("indices_recovery_max_concurrent_file_chunks", config.IndicesRecoveryMaxConcurrentFileChunks) + d.Set("action_auto_create_index_enabled", config.ActionAutoCreateIndexEnabled) + d.Set("action_destructive_requires_name", config.ActionDestructiveRequiresName) + d.Set("enable_security_audit", config.EnableSecurityAudit) + d.Set("thread_pool_search_size", config.ThreadPoolSearchSize) + d.Set("thread_pool_search_throttled_size", config.ThreadPoolSearchThrottledSize) + d.Set("thread_pool_search_throttled_queue_size", config.ThreadPoolSearchThrottledQueueSize) + d.Set("thread_pool_search_queue_size", config.ThreadPoolSearchQueueSize) + d.Set("thread_pool_get_size", config.ThreadPoolGetSize) + d.Set("thread_pool_get_queue_size", config.ThreadPoolGetQueueSize) + d.Set("thread_pool_analyze_size", config.ThreadPoolAnalyzeSize) + d.Set("thread_pool_analyze_queue_size", config.ThreadPoolAnalyzeQueueSize) + d.Set("thread_pool_write_size", config.ThreadPoolWriteSize) + d.Set("thread_pool_write_queue_size", config.ThreadPoolWriteQueueSize) + d.Set("thread_pool_force_merge_size", config.ThreadPoolForceMergeSize) + d.Set("override_main_response_version", config.OverrideMainResponseVersion) + d.Set("script_max_compilations_rate", config.ScriptMaxCompilationsRate) + d.Set("cluster_max_shards_per_node", config.ClusterMaxShardsPerNode) + d.Set("cluster_routing_allocation_node_concurrent_recoveries", config.ClusterRoutingAllocationNodeConcurrentRecoveries) + d.Set("plugins_alerting_filter_by_backend_roles_enabled", config.PluginsAlertingFilterByBackendRolesEnabled) + d.Set("reindex_remote_whitelist", config.ReindexRemoteWhitelist) + + return nil +} + +func resourceDigitalOceanDatabaseOpensearchConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + d.SetId("") + + warn := []diag.Diagnostic{ + { + Severity: diag.Warning, + Summary: "digitalocean_database_opensearch_config removed from state", + Detail: "Database configurations are only removed from state when destroyed. The remote configuration is not unset.", + }, + } + + return warn +} + +func resourceDigitalOceanDatabaseOpensearchConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + clusterID := d.Id() + + d.SetId(makeDatabaseOpensearchConfigID(clusterID)) + d.Set("cluster_id", clusterID) + + return []*schema.ResourceData{d}, nil +} + +func makeDatabaseOpensearchConfigID(clusterID string) string { + return fmt.Sprintf("%s/opensearch-config", clusterID) +} diff --git a/digitalocean/database/resource_database_opensearch_config_test.go b/digitalocean/database/resource_database_opensearch_config_test.go new file mode 100644 index 000000000..b92dbe029 --- /dev/null +++ b/digitalocean/database/resource_database_opensearch_config_test.go @@ -0,0 +1,48 @@ +package database_test + +import ( + "fmt" + "testing" + + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/acceptance" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDigitalOceanDatabaseOpensearchConfig_Basic(t *testing.T) { + name := acceptance.RandomTestName() + dbConfig := fmt.Sprintf(testAccCheckDigitalOceanDatabaseClusterOpensearch, name, "2") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.TestAccPreCheck(t) }, + ProviderFactories: acceptance.TestAccProviderFactories, + CheckDestroy: testAccCheckDigitalOceanDatabaseClusterDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseOpensearchConfigConfigBasic, dbConfig, true, 10, "1"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("digitalocean_database_opensearch_config.foobar", "ism_enabled", "true"), + resource.TestCheckResourceAttr("digitalocean_database_opensearch_config.foobar", "ism_history_max_age_hours", "10"), + resource.TestCheckResourceAttr("digitalocean_database_opensearch_config.foobar", "ism_history_max_docs", "1"), + ), + }, + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseOpensearchConfigConfigBasic, dbConfig, false, 1, "9223372036854775807"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("digitalocean_database_opensearch_config.foobar", "ism_enabled", "false"), + resource.TestCheckResourceAttr("digitalocean_database_opensearch_config.foobar", "ism_history_max_age_hours", "1"), + resource.TestCheckResourceAttr("digitalocean_database_opensearch_config.foobar", "ism_history_max_docs", "9223372036854775807"), + ), + }, + }, + }) +} + +const testAccCheckDigitalOceanDatabaseOpensearchConfigConfigBasic = ` +%s + +resource "digitalocean_database_opensearch_config" "foobar" { + cluster_id = digitalocean_database_cluster.foobar.id + ism_enabled = %t + ism_history_max_age_hours = %d + ism_history_max_docs = %s +}` diff --git a/digitalocean/provider.go b/digitalocean/provider.go index 281fa07c2..f0d682d5c 100644 --- a/digitalocean/provider.go +++ b/digitalocean/provider.go @@ -157,6 +157,7 @@ func Provider() *schema.Provider { "digitalocean_database_mysql_config": database.ResourceDigitalOceanDatabaseMySQLConfig(), "digitalocean_database_mongodb_config": database.ResourceDigitalOceanDatabaseMongoDBConfig(), "digitalocean_database_kafka_config": database.ResourceDigitalOceanDatabaseKafkaConfig(), + "digitalocean_database_opensearch_config": database.ResourceDigitalOceanDatabaseOpensearchConfig(), "digitalocean_database_kafka_topic": database.ResourceDigitalOceanDatabaseKafkaTopic(), "digitalocean_domain": domain.ResourceDigitalOceanDomain(), "digitalocean_droplet": droplet.ResourceDigitalOceanDroplet(), diff --git a/docs/resources/database_opensearch_config.md b/docs/resources/database_opensearch_config.md new file mode 100644 index 000000000..eb67ce786 --- /dev/null +++ b/docs/resources/database_opensearch_config.md @@ -0,0 +1,123 @@ +--- +page_title: "DigitalOcean: digitalocean_database_opensearch_config" +--- + +# digitalocean\_database\_opensearch\_config + +Provides a virtual resource that can be used to change advanced configuration +options for a DigitalOcean managed Opensearch database cluster. + +-> **Note** Opensearch configurations are only removed from state when destroyed. The remote configuration is not unset. + +## Example Usage + +```hcl +resource "digitalocean_database_opensearch_config" "example" { + cluster_id = digitalocean_database_cluster.example.id + ism_enabled = true + ism_history_enabled = true + ism_history_max_age_hours = 24 + ism_history_max_docs = 2500000 + ism_history_rollover_check_period_hours = 8 + ism_history_rollover_retention_period_days = 30 + http_max_content_length_bytes = 100000000 + http_max_header_size_bytes = 8192 + http_max_initial_line_length_bytes = 4096 + indices_query_bool_max_clause_count = 1024 + search_max_buckets = 10000 + indices_fielddata_cache_size_percentage = 3 + indices_memory_index_buffer_size_percentage = 10 + indices_memory_min_index_buffer_size_mb = 48 + indices_memory_max_index_buffer_size_mb = 3 + indices_queries_cache_size_percentage = 10 + indices_recovery_max_mb_per_sec = 40 + indices_recovery_max_concurrent_file_chunks = 2 + action_auto_create_index_enabled = true + action_destructive_requires_name = false + enable_security_audit = false + thread_pool_search_size = 1 + thread_pool_search_throttled_size = 1 + thread_pool_search_throttled_queue_size = 10 + thread_pool_search_queue_size = 10 + thread_pool_get_size = 1 + thread_pool_get_queue_size = 10 + thread_pool_analyze_size = 1 + thread_pool_analyze_queue_size = 10 + thread_pool_write_size = 1 + thread_pool_write_queue_size = 10 + thread_pool_force_merge_size = 1 + override_main_response_version = false + script_max_compilations_rate = "use-context" + cluster_max_shards_per_node = 100 + cluster_routing_allocation_node_concurrent_recoveries = 2 + plugins_alerting_filter_by_backend_roles_enabled = false + reindex_remote_whitelist = ["cloud.digitalocean.com:8080"] +} + +resource "digitalocean_database_cluster" "example" { + name = "example-opensearch-cluster" + engine = "opensearch" + version = "2" + size = "db-s-1vcpu-2gb" + region = "nyc3" + node_count = 1 +} +``` + + +## Argument Reference + +The following arguments are supported. See the [DigitalOcean API documentation](https://docs.digitalocean.com/reference/api/api-reference/#operation/databases_patch_config) +for additional details on each option. + +* `cluster_id` - (Required) The ID of the target Opensearch cluster. +* `ism_enabled` - (Optional) Specifies whether ISM is enabled or not. Default: `true` +* `ism_history_enabled` - (Optional) Specifies whether audit history is enabled or not. The logs from ISM are automatically indexed to a logs document. Default: `true` +* `ism_history_max_age_hours` - (Optional) Maximum age before rolling over the audit history index, in hours. Default: `24` +* `ism_history_max_docs` - (Optional) Maximum number of documents before rolling over the audit history index. Default: `2500000` +* `ism_history_rollover_check_period_hours` - (Optional) The time between rollover checks for the audit history index, in hours. Default: `8` +* `ism_history_rollover_retention_period_days` - (Optional) Length of time long audit history indices are kept, in days. Default: `30` +* `http_max_content_length_bytes` - (Optional) Maximum content length for HTTP requests to the OpenSearch HTTP API, in bytes. Default: `100000000` +* `http_max_header_size_bytes` - (Optional) Maximum size of allowed headers, in bytes. Default: `8192` +* `http_max_initial_line_length_bytes` - (Optional) Maximum length of an HTTP URL, in bytes. Default: `4096` +* `indices_query_bool_max_clause_count` - (Optional) Maximum number of clauses Lucene BooleanQuery can have. Only increase it if necessary, as it may cause performance issues. Default: `1024` +* `search_max_buckets` - (Optional) Maximum number of aggregation buckets allowed in a single response. Default: `10000` +* `indices_fielddata_cache_size_percentage` - (Optional) Maximum amount of heap memory used for field data cache, expressed as a percentage. Decreasing the value too much will increase overhead of loading field data. Increasing the value too much will decrease amount of heap available for other operations. +* `indices_memory_index_buffer_size_percentage` - (Optional) Total amount of heap used for indexing buffer before writing segments to disk, expressed as a percentage. Too low value will slow down indexing; too high value will increase indexing performance but causes performance issues for query performance. Default: `10` +* `indices_memory_min_index_buffer_size_mb` - (Optional) Minimum amount of heap used for indexing buffer before writing segments to disk, in mb. Works in conjunction with indices_memory_index_buffer_size_percentage, each being enforced. Default: `48` +* `indices_memory_max_index_buffer_size_mb` - (Optional) Maximum amount of heap used for indexing buffer before writing segments to disk, in mb. Works in conjunction with indices_memory_index_buffer_size_percentage, each being enforced. The default is unbounded. +* `indices_queries_cache_size_percentage` - (Optional) Maximum amount of heap used for query cache. Too low value will decrease query performance and increase performance for other operations; too high value will cause issues with other functionality. Default: `10` +* `indices_recovery_max_mb_per_sec` - (Optional) Limits total inbound and outbound recovery traffic for each node, expressed in mb per second. Applies to both peer recoveries as well as snapshot recoveries (i.e., restores from a snapshot). Default: `40` +* `indices_recovery_max_concurrent_file_chunks` - (Optional) Maximum number of file chunks sent in parallel for each recovery. Default: `2` +* `action_auto_create_index_enabled` - (Optional) Specifies whether ISM is enabled or not. Default: `true` +* `action_destructive_requires_name` - (Optional) Specifies whether to require explicit index names when deleting indices. +* `enable_security_audit` - (Optional) Specifies whether to allow security audit logging. Default: `false` +* `thread_pool_search_size` - (Optional) Number of workers in the search operation thread pool. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. +* `thread_pool_search_throttled_size` - (Optional) Number of workers in the search throttled operation thread pool. This pool is used for searching frozen indices. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. +* `thread_pool_search_throttled_queue_size` - (Optional) Size of queue for operations in the search throttled thread pool. +* `thread_pool_search_queue_size` - (Optional) Size of queue for operations in the search thread pool. +* `thread_pool_get_size` - (Optional) Number of workers in the get operation thread pool. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. +* `thread_pool_get_queue_size` - (Optional) Size of queue for operations in the get thread pool. +* `thread_pool_analyze_size` - (Optional) Number of workers in the analyze operation thread pool. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. +* `thread_pool_analyze_queue_size` - (Optional) Size of queue for operations in the analyze thread pool. +* `thread_pool_write_size` - (Optional) Number of workers in the write operation thread pool. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. +* `thread_pool_write_queue_size` - (Optional) Size of queue for operations in the write thread pool. +* `thread_pool_force_merge_size` - (Optional) Number of workers in the force merge operation thread pool. This pool is used for forcing a merge between shards of one or more indices. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value. +* `override_main_response_version` - (Optional) Compatibility mode sets OpenSearch to report its version as 7.10 so clients continue to work. Default: `false` +* `script_max_compilations_rate` - (Optional) Limits the number of inline script compilations within a period of time. Default is `use-context` +* `cluster_max_shards_per_node` - (Optional) Maximum number of shards allowed per data node. +* `cluster_routing_allocation_node_concurrent_recoveries` - (Optional) Maximum concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen per node. Default: `2` +* `plugins_alerting_filter_by_backend_roles_enabled` - (Optional) Enable or disable filtering of alerting by backend roles. Default: `false` +* `reindex_remote_whitelist` - (Optional) Allowlist of remote IP addresses for reindexing. Changing this value will cause all OpenSearch instances to restart. + +## Attributes Reference + +All above attributes are exported. If an attribute was set outside of Terraform, it will be computed. + +## Import + +A Opensearch database cluster's configuration can be imported using the `id` the parent cluster, e.g. + +``` +terraform import digitalocean_database_opensearch_config.example 4b62829a-9c42-465b-aaa3-84051048e712 +``` diff --git a/go.mod b/go.mod index c2440d8a1..cef6e2feb 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,9 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 + github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb github.com/hashicorp/awspolicyequivalence v1.5.0 + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.26.1 @@ -25,7 +26,6 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.8 // indirect diff --git a/go.sum b/go.sum index 633f76663..07b7e1430 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 h1:G/lf5YrNl4bDJyp3yJRld3J5BTFpQStYJHEnE6SxigY= -github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb h1:D6/pJ6+5wMkpwaF2HMJqvhlPlxWdlbpKx6bOpy8sbBg= +github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index b045687a9..371272169 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,10 @@ # Change Log +## [v1.126.0] - 2024-09-25 + +- #732 - @gottwald - DOKS: add custom CIDR fields +- #727 - @loosla - [databases]: add support for Kafka advanced configuration + ## [v1.125.0] - 2024-09-17 - #726 - @loosla - [databases]: add support for MongoDB advanced configuration diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index e168186ff..276fb4a6b 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -154,11 +154,13 @@ type DatabasesService interface { GetRedisConfig(context.Context, string) (*RedisConfig, *Response, error) GetMySQLConfig(context.Context, string) (*MySQLConfig, *Response, error) GetMongoDBConfig(context.Context, string) (*MongoDBConfig, *Response, error) + GetOpensearchConfig(context.Context, string) (*OpensearchConfig, *Response, error) GetKafkaConfig(context.Context, string) (*KafkaConfig, *Response, error) UpdatePostgreSQLConfig(context.Context, string, *PostgreSQLConfig) (*Response, error) UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error) UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error) UpdateMongoDBConfig(context.Context, string, *MongoDBConfig) (*Response, error) + UpdateOpensearchConfig(context.Context, string, *OpensearchConfig) (*Response, error) UpdateKafkaConfig(context.Context, string, *KafkaConfig) (*Response, error) ListOptions(todo context.Context) (*DatabaseOptions, *Response, error) UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error) @@ -683,6 +685,48 @@ type KafkaConfig struct { AutoCreateTopicsEnable *bool `json:"auto_create_topics_enable,omitempty"` } +// OpensearchConfig holds advanced configurations for Opensearch database clusters. +type OpensearchConfig struct { + HttpMaxContentLengthBytes *int `json:"http_max_content_length_bytes,omitempty"` + HttpMaxHeaderSizeBytes *int `json:"http_max_header_size_bytes,omitempty"` + HttpMaxInitialLineLengthBytes *int `json:"http_max_initial_line_length_bytes,omitempty"` + IndicesQueryBoolMaxClauseCount *int `json:"indices_query_bool_max_clause_count,omitempty"` + IndicesFielddataCacheSizePercentage *int `json:"indices_fielddata_cache_size_percentage,omitempty"` + IndicesMemoryIndexBufferSizePercentage *int `json:"indices_memory_index_buffer_size_percentage,omitempty"` + IndicesMemoryMinIndexBufferSizeMb *int `json:"indices_memory_min_index_buffer_size_mb,omitempty"` + IndicesMemoryMaxIndexBufferSizeMb *int `json:"indices_memory_max_index_buffer_size_mb,omitempty"` + IndicesQueriesCacheSizePercentage *int `json:"indices_queries_cache_size_percentage,omitempty"` + IndicesRecoveryMaxMbPerSec *int `json:"indices_recovery_max_mb_per_sec,omitempty"` + IndicesRecoveryMaxConcurrentFileChunks *int `json:"indices_recovery_max_concurrent_file_chunks,omitempty"` + ThreadPoolSearchSize *int `json:"thread_pool_search_size,omitempty"` + ThreadPoolSearchThrottledSize *int `json:"thread_pool_search_throttled_size,omitempty"` + ThreadPoolGetSize *int `json:"thread_pool_get_size,omitempty"` + ThreadPoolAnalyzeSize *int `json:"thread_pool_analyze_size,omitempty"` + ThreadPoolWriteSize *int `json:"thread_pool_write_size,omitempty"` + ThreadPoolForceMergeSize *int `json:"thread_pool_force_merge_size,omitempty"` + ThreadPoolSearchQueueSize *int `json:"thread_pool_search_queue_size,omitempty"` + ThreadPoolSearchThrottledQueueSize *int `json:"thread_pool_search_throttled_queue_size,omitempty"` + ThreadPoolGetQueueSize *int `json:"thread_pool_get_queue_size,omitempty"` + ThreadPoolAnalyzeQueueSize *int `json:"thread_pool_analyze_queue_size,omitempty"` + ThreadPoolWriteQueueSize *int `json:"thread_pool_write_queue_size,omitempty"` + IsmEnabled *bool `json:"ism_enabled,omitempty"` + IsmHistoryEnabled *bool `json:"ism_history_enabled,omitempty"` + IsmHistoryMaxAgeHours *int `json:"ism_history_max_age_hours,omitempty"` + IsmHistoryMaxDocs *int64 `json:"ism_history_max_docs,omitempty"` + IsmHistoryRolloverCheckPeriodHours *int `json:"ism_history_rollover_check_period_hours,omitempty"` + IsmHistoryRolloverRetentionPeriodDays *int `json:"ism_history_rollover_retention_period_days,omitempty"` + SearchMaxBuckets *int `json:"search_max_buckets,omitempty"` + ActionAutoCreateIndexEnabled *bool `json:"action_auto_create_index_enabled,omitempty"` + EnableSecurityAudit *bool `json:"enable_security_audit,omitempty"` + ActionDestructiveRequiresName *bool `json:"action_destructive_requires_name,omitempty"` + ClusterMaxShardsPerNode *int `json:"cluster_max_shards_per_node,omitempty"` + OverrideMainResponseVersion *bool `json:"override_main_response_version,omitempty"` + ScriptMaxCompilationsRate *string `json:"script_max_compilations_rate,omitempty"` + ClusterRoutingAllocationNodeConcurrentRecoveries *int `json:"cluster_routing_allocation_node_concurrent_recoveries,omitempty"` + ReindexRemoteWhitelist []string `json:"reindex_remote_whitelist,omitempty"` + PluginsAlertingFilterByBackendRolesEnabled *bool `json:"plugins_alerting_filter_by_backend_roles_enabled,omitempty"` +} + type databaseUserRoot struct { User *DatabaseUser `json:"user"` } @@ -727,6 +771,10 @@ type databaseMongoDBConfigRoot struct { Config *MongoDBConfig `json:"config"` } +type databaseOpensearchConfigRoot struct { + Config *OpensearchConfig `json:"config"` +} + type databaseKafkaConfigRoot struct { Config *KafkaConfig `json:"config"` } @@ -1606,6 +1654,38 @@ func (svc *DatabasesServiceOp) UpdateKafkaConfig(ctx context.Context, databaseID return resp, nil } +// GetOpensearchConfig retrieves the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) GetOpensearchConfig(ctx context.Context, databaseID string) (*OpensearchConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseOpensearchConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateOpensearchConfig updates the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) UpdateOpensearchConfig(ctx context.Context, databaseID string, config *OpensearchConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseOpensearchConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListOptions gets the database options available. func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOptions, *Response, error) { root := new(databaseOptionsRoot) diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 45c0f115f..3702ac1f7 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.125.0" + libraryVersion = "1.126.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index 8ef9d241e..9b3bcfa1a 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -65,11 +65,13 @@ type KubernetesServiceOp struct { // KubernetesClusterCreateRequest represents a request to create a Kubernetes cluster. type KubernetesClusterCreateRequest struct { - Name string `json:"name,omitempty"` - RegionSlug string `json:"region,omitempty"` - VersionSlug string `json:"version,omitempty"` - Tags []string `json:"tags,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` + Name string `json:"name,omitempty"` + RegionSlug string `json:"region,omitempty"` + VersionSlug string `json:"version,omitempty"` + Tags []string `json:"tags,omitempty"` + VPCUUID string `json:"vpc_uuid,omitempty"` + ClusterSubnet string `json:"cluster_subnet,omitempty"` + ServiceSubnet string `json:"service_subnet,omitempty"` // Create cluster with highly available control plane HA bool `json:"ha"` diff --git a/vendor/modules.txt b/vendor/modules.txt index 1ba5f61c4..4be2307e9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 +# github.com/digitalocean/godo v1.126.1-0.20241004175533-dfe74ef3d8bb ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics