From 9d114e03610650cb9392f1e4005eabfa254b4000 Mon Sep 17 00:00:00 2001 From: Mitchel-Stig-Instaclustr <70504904+Mitchel-Stig-Instaclustr@users.noreply.github.com> Date: Thu, 3 Sep 2020 10:05:00 +1000 Subject: [PATCH] Add redis support (#48) * Added Redis capabilities to Terraform * Updated Readme and examples to include Redis * Allowed upper/lowercase bundle types. * Updated makefile version --- Makefile | 2 +- README.md | 7 +- examples/main.tf | 20 +++ instaclustr/resource_cluster.go | 44 ++++-- instaclustr/structs.go | 20 +-- test/data/valid_redis_cluster_create.tf | 25 ++++ test/resource_cluster_test.go | 174 ++++++++++++++---------- test/test_utils.go | 55 ++++++-- 8 files changed, 247 insertions(+), 100 deletions(-) create mode 100644 test/data/valid_redis_cluster_create.tf diff --git a/Makefile b/Makefile index e8fd9ad9..f96d6f55 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ BIN_NAME="terraform-provider-instaclustr" -VERSION=v1.3.1 +VERSION=v1.4.0 .PHONY: install clean all build test testacc diff --git a/README.md b/README.md index 585cb0ad..0e7c7c85 100644 --- a/README.md +++ b/README.md @@ -110,7 +110,7 @@ cluster_network|The private network address block for the cluster specified usin private_network_cluster|Accepts true/false. Creates the cluster with private network only.|false pci_compliant_cluster|Accepts true/false. Creates the cluster with PCI compliance enabled.|false cluster_provider|The information of infrastructure provider. See below for its properties.|Required -rack_allocation|The number of resources to use. See below for its properties.|Required +rack_allocation|The number of resources to use. See below for its properties.|Optional, but Required for all Bundle types excluding Redis. bundle|Array of bundle information. See below for its properties.|Required `cluster_provider` @@ -144,7 +144,7 @@ options|Options and add-ons for the given bundle. See `bundle.options` below for Property | Description | For Bundles | Default ---------|-------------|-------------|-------- auth_n_authz|Accepts true/false. Enables Password Authentication and User Authorization.|Cassandra|false -client_encryption|Accepts true/false. Enables Client ⇄ Node Encryption.|Cassandra, Kafka, Elasticsearch, Spark|false +client_encryption|Accepts true/false. Enables Client ⇄ Node Encryption.|Cassandra, Kafka, Elasticsearch, Spark, Redis|false dedicated_master_nodes|Accepts true/false. Enables Dedicated Master Nodes.|Elasticsearch|false master_node_size|Desired master node size. See [here](https://www.instaclustr.com/support/api-integrations/api-reference/provisioning-api/#section-reference-data-data-centres-and-node-sizes) for more details.|Elasticsearch|Required security_plugin|Accepts true/false. Enables Security Plugin. This option gives an extra layer of security to the cluster. This will automatically enable internode encryption.|Elasticsearch|false @@ -161,6 +161,8 @@ aws_access_key, aws_secret_key, s3_bucket_name|Access information for the S3 buc azure_storage_account_name, azure_storage_account_key, azure_storage_container_name|Access information for the Azure Storage container where you will store your custom connectors.|Kafka Connect ssl_enabled_protocols, ssl_truststore_password, ssl_protocol, security_protocol, sasl_mechanism, sasl_jaas_config, bootstrap_servers|Connection information for your Kafka Cluster. These options are analogous to the similarly named options that you would place in your Kafka Connect worker.properties file. Only required if connecting to a Non-Instaclustr managed Kafka Cluster|Kafka Connect truststore|Base64 encoded version of the TLS trust store (in JKS format) used to connect to your Kafka Cluster. Only required if connecting to a Non-Instaclustr managed Kafka Cluster with TLS enabled|Kafka Connect +master_nodes|The number of Master nodes in a generated Redis Cluster.|Redis|Required (Integers) +replica_nodes|The number of Replica nodes in a generated Redis Cluster.|Redis|Required (Integers) ### Resource: `instaclustr_firewall_rule` A resource for managing cluster firewall rules on Instaclustr Managed Platform. A firewall rule allows access to your Instaclustr cluster. @@ -289,6 +291,7 @@ KAFKA_REST_PROXY|5.0.0|KAFKA KAFKA_SCHEMA_REGISTRY|5.0.0|KAFKA ELASTICSEARCH|opendistro-for-elasticsearch:1.4.0 KAFKA_CONNECT|2.3.1, 2.4.1| +REDIS|6.0.4| ### Migrating from 0.0.1 → 1.0.0+ A schema change has been made from 0.0.1 which no longer supports the `bundles` argument and uses `bundle` blocks instead. This change can cause `terraform apply` to fail with a message that `bundles` has been removed and/or updating isn't supported. To resolve this -
diff --git a/examples/main.tf b/examples/main.tf index 870a2316..b8cd5eb7 100644 --- a/examples/main.tf +++ b/examples/main.tf @@ -208,4 +208,24 @@ resource "instaclustr_cluster" "private_cluster_example" { bundle = "APACHE_CASSANDRA" version = "3.11.4" } +} + +resource "instaclustr_cluster" "example-redis" { + cluster_name = "testcluster" + node_size = "t3.small-20-r" + data_centre = "US_WEST_2" + sla_tier = "NON_PRODUCTION" + cluster_network = "192.168.0.0/18" + cluster_provider = { + name = "AWS_VPC" + } + + bundle { + bundle = "REDIS" + version = "6.0.4" + options = { + master_nodes = 3, + replica_nodes = 3 + } + } } \ No newline at end of file diff --git a/instaclustr/resource_cluster.go b/instaclustr/resource_cluster.go index 2759fec8..3c88b092 100644 --- a/instaclustr/resource_cluster.go +++ b/instaclustr/resource_cluster.go @@ -110,7 +110,7 @@ func resourceCluster() *schema.Resource { "rack_allocation": { Type: schema.TypeMap, - Required: true, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "number_of_racks": { @@ -254,6 +254,14 @@ func resourceCluster() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "master_nodes": { + Type: schema.TypeInt, + Optional: true, + }, + "replica_nodes": { + Type: schema.TypeInt, + Optional: true, + }, }, }, }, @@ -279,12 +287,6 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { return err } - var rackAllocation RackAllocation - err = mapstructure.Decode(d.Get("rack_allocation").(map[string]interface{}), &rackAllocation) - if err != nil { - return err - } - createData := CreateRequest{ ClusterName: d.Get("cluster_name").(string), Bundles: bundles, @@ -295,7 +297,17 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { ClusterNetwork: d.Get("cluster_network").(string), PrivateNetworkCluster: fmt.Sprintf("%v", d.Get("private_network_cluster")), PCICompliantCluster: fmt.Sprintf("%v", d.Get("pci_compliant_cluster")), - RackAllocation: rackAllocation, + } + + // Some Bundles do not use Rack Allocation so add that separately if needed. (Redis for example) + if checkIfBundleRequiresRackAllocation(bundles) { + var rackAllocation RackAllocation + err = mapstructure.Decode(d.Get("rack_allocation").(map[string]interface{}), &rackAllocation) + if err != nil { + return err + } + + createData.RackAllocation = &rackAllocation } var jsonStr []byte @@ -421,3 +433,19 @@ func getBundles(d *schema.ResourceData) ([]Bundle, error) { func formatCreateErrMsg(err error) error { return fmt.Errorf("[Error] Error creating cluster: %s", err) } + +func checkIfBundleRequiresRackAllocation(bundles []Bundle) bool { + var noRackAllocationBundles = []string{ + "REDIS", + } + + for i := 0; i < len(bundles); i++ { + for j := 0; j < len(noRackAllocationBundles); j++ { + if strings.ToLower(bundles[i].Bundle) == strings.ToLower(noRackAllocationBundles[j]) { + return false + } + } + } + + return true +} diff --git a/instaclustr/structs.go b/instaclustr/structs.go index b13c6231..97ca925d 100644 --- a/instaclustr/structs.go +++ b/instaclustr/structs.go @@ -1,9 +1,9 @@ package instaclustr type FirewallRule struct { - Network string `json:"network,omitempty"` + Network string `json:"network,omitempty"` SecurityGroupId string `json:"securityGroupId,omitempty"` - Rules []RuleType `json:"rules"` + Rules []RuleType `json:"rules"` } type RuleType struct { @@ -45,6 +45,8 @@ type BundleOptions struct { SaslJaasConfig string `json:"sasl.jaas.config,omitempty" mapstructure:"sasl_jaas_config"` BootstrapServers string `json:"bootstrap.servers,omitempty" mapstructure:"bootstrap_servers"` Truststore string `json:"truststore,omitempty" mapstructure:"truststore"` + RedisMasterNodes string `json:"masterNodes,omitempty" mapstructure:"master_nodes"` + RedisReplicaNodes string `json:"replicaNodes,omitempty" mapstructure:"replica_nodes"` } type ClusterProvider struct { @@ -71,7 +73,7 @@ type CreateRequest struct { ClusterNetwork string `json:"clusterNetwork"` PrivateNetworkCluster string `json:"privateNetworkCluster"` PCICompliantCluster string `json:"pciCompliantCluster"` - RackAllocation RackAllocation `json:"rackAllocation"` + RackAllocation *RackAllocation `json:"rackAllocation,omitempty"` } type Cluster struct { @@ -153,16 +155,16 @@ type EncryptionKey struct { } type CreateKafkaUserRequest struct { - Username string `json:"username"` - Password string `json:"password"` - InitialPermissions string `json:"initial-permissions"` + Username string `json:"username"` + Password string `json:"password"` + InitialPermissions string `json:"initial-permissions"` } type UpdateKafkaUserRequest struct { - Username string `json:"username"` - Password string `json:"password"` + Username string `json:"username"` + Password string `json:"password"` } type DeleteKafkaUserRequest struct { - Username string `json:"username"` + Username string `json:"username"` } diff --git a/test/data/valid_redis_cluster_create.tf b/test/data/valid_redis_cluster_create.tf new file mode 100644 index 00000000..cf3d6739 --- /dev/null +++ b/test/data/valid_redis_cluster_create.tf @@ -0,0 +1,25 @@ +provider "instaclustr" { + username = "%s" + api_key = "%s" +} + +resource "instaclustr_cluster" "validRedis" { + cluster_name = "testcluster" + node_size = "t3.small-20-r" + data_centre = "US_WEST_2" + sla_tier = "NON_PRODUCTION" + cluster_network = "192.168.0.0/18" + cluster_provider = { + name = "AWS_VPC" + } + + bundle { + bundle = "REDIS" + version = "6.0.4" + options = { + master_nodes = 3, + replica_nodes = 3 + } + } +} + diff --git a/test/resource_cluster_test.go b/test/resource_cluster_test.go index cceec427..9c89c6f6 100644 --- a/test/resource_cluster_test.go +++ b/test/resource_cluster_test.go @@ -13,7 +13,6 @@ import ( "github.com/instaclustr/terraform-provider-instaclustr/instaclustr" ) - func TestAccCluster(t *testing.T) { testAccProvider := instaclustr.Provider() testAccProviders := map[string]terraform.ResourceProvider{ @@ -46,79 +45,79 @@ func TestAccCluster(t *testing.T) { } func TestKafkaConnectClusterCreateInstaclustrAWS(t *testing.T) { - if v := os.Getenv("IC_TEST_KAFKA_CONNECT"); v == "" { - t.Skip("Skipping TestKafkaConnectClusterCreateInstaclustrAWS because IC_TEST_KAFKA_CONNECT is not set") - } - testAccProvider := instaclustr.Provider() - testAccProviders := map[string]terraform.ResourceProvider{ - "instaclustr": testAccProvider, - } - validKCConfig, _ := ioutil.ReadFile("data/valid_kafka_connect_instaclustr_aws.tf") - username := os.Getenv("IC_USERNAME") - apiKey := os.Getenv("IC_API_KEY") - kafkaClusterId := os.Getenv("IC_TARGET_KAFKA_CLUSTER_ID") - awsAccessKey := os.Getenv("IC_AWS_ACCESS_KEY") - awsSecretKey := os.Getenv("IC_AWS_SECRET_KEY") - S3BucketName := os.Getenv("IC_S3_BUCKET_NAME") - oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, kafkaClusterId, awsAccessKey, awsSecretKey, S3BucketName) - hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname) - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - PreCheck: func() { AccTestEnvVarsCheck(t) }, - CheckDestroy: testCheckResourceDeleted("validKC", hostname, username, apiKey), - Steps: []resource.TestStep{ - { - Config: oriKCConfig, - Check: resource.ComposeTestCheckFunc( - testCheckResourceValid("validKC"), - testCheckResourceCreated("validKC", hostname, username, apiKey), - ), - }, - }, - }) -} + if v := os.Getenv("IC_TEST_KAFKA_CONNECT"); v == "" { + t.Skip("Skipping TestKafkaConnectClusterCreateInstaclustrAWS because IC_TEST_KAFKA_CONNECT is not set") + } + testAccProvider := instaclustr.Provider() + testAccProviders := map[string]terraform.ResourceProvider{ + "instaclustr": testAccProvider, + } + validKCConfig, _ := ioutil.ReadFile("data/valid_kafka_connect_instaclustr_aws.tf") + username := os.Getenv("IC_USERNAME") + apiKey := os.Getenv("IC_API_KEY") + kafkaClusterId := os.Getenv("IC_TARGET_KAFKA_CLUSTER_ID") + awsAccessKey := os.Getenv("IC_AWS_ACCESS_KEY") + awsSecretKey := os.Getenv("IC_AWS_SECRET_KEY") + S3BucketName := os.Getenv("IC_S3_BUCKET_NAME") + oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, kafkaClusterId, awsAccessKey, awsSecretKey, S3BucketName) + hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname) + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + PreCheck: func() { AccTestEnvVarsCheck(t) }, + CheckDestroy: testCheckResourceDeleted("validKC", hostname, username, apiKey), + Steps: []resource.TestStep{ + { + Config: oriKCConfig, + Check: resource.ComposeTestCheckFunc( + testCheckResourceValid("validKC"), + testCheckResourceCreated("validKC", hostname, username, apiKey), + ), + }, + }, + }) +} func TestKafkaConnectClusterCreateNonInstaclustrAZURE(t *testing.T) { - if v := os.Getenv("IC_TEST_KAFKA_CONNECT"); v == "" { - t.Skip("Skipping TestKafkaConnectClusterCreateNonInstaclustrAZURE because IC_TEST_KAFKA_CONNECT is not set") - } - testAccProvider := instaclustr.Provider() - testAccProviders := map[string]terraform.ResourceProvider{ - "instaclustr": testAccProvider, - } - validKCConfig, _ := ioutil.ReadFile("data/valid_kafka_connect_non_instaclustr_azure.tf") - username := os.Getenv("IC_USERNAME") - apiKey := os.Getenv("IC_API_KEY") - azureStorageAccountName := os.Getenv("IC_AZURE_STORAGE_ACCOUNT_NAME") - azureStorageAccountKey := os.Getenv("IC_AZURE_STORAGE_ACCOUNT_KEY") - azureStorageContainerName := os.Getenv("IC_AZURE_STORAGE_CONTAINER_NAME") - sslEnabledProtocols := os.Getenv("IC_SSL_ENABLED_PROTOCOLS") - sslTruststorePassword := os.Getenv("IC_SSL_TRUSTSTORE_PASSWORD") - sslProtocol := os.Getenv("IC_SSL_PROTOCOL") - securityProtocol := os.Getenv("IC_SECURITY_PROTOCOL") - saslMechanism := os.Getenv("IC_SASL_MECHANISM") - saslJaasConfig := os.Getenv("IC_SASL_JAAS_CONFIG") - bootstrapServers := os.Getenv("IC_BOOTSTRAP_SERVER") - truststore := os.Getenv("IC_TRUSTSTORE") - oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, azureStorageAccountName, + if v := os.Getenv("IC_TEST_KAFKA_CONNECT"); v == "" { + t.Skip("Skipping TestKafkaConnectClusterCreateNonInstaclustrAZURE because IC_TEST_KAFKA_CONNECT is not set") + } + testAccProvider := instaclustr.Provider() + testAccProviders := map[string]terraform.ResourceProvider{ + "instaclustr": testAccProvider, + } + validKCConfig, _ := ioutil.ReadFile("data/valid_kafka_connect_non_instaclustr_azure.tf") + username := os.Getenv("IC_USERNAME") + apiKey := os.Getenv("IC_API_KEY") + azureStorageAccountName := os.Getenv("IC_AZURE_STORAGE_ACCOUNT_NAME") + azureStorageAccountKey := os.Getenv("IC_AZURE_STORAGE_ACCOUNT_KEY") + azureStorageContainerName := os.Getenv("IC_AZURE_STORAGE_CONTAINER_NAME") + sslEnabledProtocols := os.Getenv("IC_SSL_ENABLED_PROTOCOLS") + sslTruststorePassword := os.Getenv("IC_SSL_TRUSTSTORE_PASSWORD") + sslProtocol := os.Getenv("IC_SSL_PROTOCOL") + securityProtocol := os.Getenv("IC_SECURITY_PROTOCOL") + saslMechanism := os.Getenv("IC_SASL_MECHANISM") + saslJaasConfig := os.Getenv("IC_SASL_JAAS_CONFIG") + bootstrapServers := os.Getenv("IC_BOOTSTRAP_SERVER") + truststore := os.Getenv("IC_TRUSTSTORE") + oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, azureStorageAccountName, azureStorageAccountKey, azureStorageContainerName, sslEnabledProtocols, sslTruststorePassword, sslProtocol, securityProtocol, saslMechanism, saslJaasConfig, bootstrapServers, truststore) - hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname) - resource.Test(t, resource.TestCase{ - Providers: testAccProviders, - PreCheck: func() { AccTestEnvVarsCheck(t) }, - CheckDestroy: testCheckResourceDeleted("validKC", hostname, username, apiKey), - Steps: []resource.TestStep{ - { - Config: oriKCConfig, - Check: resource.ComposeTestCheckFunc( - testCheckResourceValid("validKC"), - testCheckResourceCreated("validKC", hostname, username, apiKey), - ), - }, - }, - }) -} + hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname) + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + PreCheck: func() { AccTestEnvVarsCheck(t) }, + CheckDestroy: testCheckResourceDeleted("validKC", hostname, username, apiKey), + Steps: []resource.TestStep{ + { + Config: oriKCConfig, + Check: resource.ComposeTestCheckFunc( + testCheckResourceValid("validKC"), + testCheckResourceCreated("validKC", hostname, username, apiKey), + ), + }, + }, + }) +} func TestKafkaConnectClusterInvalid(t *testing.T) { testAccProvider := instaclustr.Provider() @@ -129,8 +128,8 @@ func TestKafkaConnectClusterInvalid(t *testing.T) { username := os.Getenv("IC_USERNAME") apiKey := os.Getenv("IC_API_KEY") hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname) - invalidConfig := fmt.Sprintf(string(readConfig), username, apiKey, hostname) - fmt.Printf("Config : %s", invalidConfig) + invalidConfig := fmt.Sprintf(string(readConfig), username, apiKey, hostname) + fmt.Printf("Config : %s", invalidConfig) resource.Test(t, resource.TestCase{ Providers: testAccProviders, PreCheck: func() { AccTestEnvVarsCheck(t) }, @@ -345,3 +344,34 @@ func testCheckClusterResize(hostname, username, apiKey, expectedNodeSize string) return nil } } + +func TestValidRedisClusterCreate(t *testing.T) { + testAccProvider := instaclustr.Provider() + testAccProviders := map[string]terraform.ResourceProvider{ + "instaclustr": testAccProvider, + } + validConfig, _ := ioutil.ReadFile("data/valid_redis_cluster_create.tf") + username := os.Getenv("IC_USERNAME") + apiKey := os.Getenv("IC_API_KEY") + oriConfig := fmt.Sprintf(string(validConfig), username, apiKey) + hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname) + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + PreCheck: func() { + checkAccVariablesSet(t, []string{ + "IC_USERNAME", + "IC_API_KEY", + }) + }, + CheckDestroy: testCheckResourceDeleted("validRedis", hostname, username, apiKey), + Steps: []resource.TestStep{ + { + Config: oriConfig, + Check: resource.ComposeTestCheckFunc( + testCheckResourceValid("validRedis"), + testCheckResourceCreated("validRedis", hostname, username, apiKey), + ), + }, + }, + }) +} diff --git a/test/test_utils.go b/test/test_utils.go index 522beb5f..33bfb056 100644 --- a/test/test_utils.go +++ b/test/test_utils.go @@ -1,9 +1,9 @@ package test import ( + "fmt" "os" "testing" - "fmt" ) func AccTestEnvVarsCheck(t *testing.T) { @@ -23,24 +23,63 @@ func AccTestEnvVarsCheck(t *testing.T) { t.Fatal("IC_PROV_VPC_ID for provisioning API must be set for acceptance tests") } if x := os.Getenv("IC_TEST_KAFKA_CONNECT"); x != "" { - env_vars := []string{"IC_TARGET_KAFKA_CLUSTER_ID", "IC_AWS_ACCESS_KEY", "IC_AWS_SECRET_KEY", "IC_S3_BUCKET_NAME", + env_vars := []string{"IC_TARGET_KAFKA_CLUSTER_ID", "IC_AWS_ACCESS_KEY", "IC_AWS_SECRET_KEY", "IC_S3_BUCKET_NAME", "IC_AZURE_STORAGE_ACCOUNT_NAME", "IC_AZURE_STORAGE_ACCOUNT_KEY", "IC_AZURE_STORAGE_CONTAINER_NAME", "IC_SSL_ENABLED_PROTOCOLS", "IC_SSL_TRUSTSTORE_PASSWORD", "IC_SSL_PROTOCOL", "IC_SECURITY_PROTOCOL", "IC_SASL_MECHANISM", "IC_SASL_JAAS_CONFIG", "IC_BOOTSTRAP_SERVERS", "IC_TRUSTSTORE"} for _, s := range env_vars { if v := os.Getenv(s); v == "" { - fatalMessage := fmt.Sprintf("When IC_TEST_KAFKA_CONNECT is set, %s must be set for acceptance tests", s) + fatalMessage := fmt.Sprintf("When IC_TEST_KAFKA_CONNECT is set, %s must be set for acceptance tests", s) t.Fatal(fatalMessage, s) } } } } +func checkAccVariablesSet(t *testing.T, envVars []string) { + for i := 0; i < len(envVars); i++ { + switch envVars[i] { + case "IC_USERNAME": + if v := os.Getenv("IC_USERNAME"); v == "" { + t.Fatal("IC_USERNAME for provisioning API must be set for acceptance tests") + } + case "IC_API_KEY": + if v := os.Getenv("IC_API_KEY"); v == "" { + t.Fatal("IC_API_KEY for provisioning API must be set for acceptance tests") + } + case "KMS_ARN": + if v := os.Getenv("KMS_ARN"); v == "" { + t.Fatal("KMS_ARN for AccEBS encryption must be set for acceptance tests") + } + case "IC_PROV_ACC_NAME": + if v := os.Getenv("IC_PROV_ACC_NAME"); v == "" { + t.Fatal("IC_PROV_ACC_NAME for provisioning API must be set for acceptance tests") + } + case "IC_PROV_VPC_ID": + if v := os.Getenv("IC_PROV_VPC_ID"); v == "" { + t.Fatal("IC_PROV_VPC_ID for provisioning API must be set for acceptance tests") + } + case "IC_TEST_KAFKA_CONNECT": + if x := os.Getenv("IC_TEST_KAFKA_CONNECT"); x != "" { + env_vars := []string{"IC_TARGET_KAFKA_CLUSTER_ID", "IC_AWS_ACCESS_KEY", "IC_AWS_SECRET_KEY", "IC_S3_BUCKET_NAME", + "IC_AZURE_STORAGE_ACCOUNT_NAME", "IC_AZURE_STORAGE_ACCOUNT_KEY", "IC_AZURE_STORAGE_CONTAINER_NAME", + "IC_SSL_ENABLED_PROTOCOLS", "IC_SSL_TRUSTSTORE_PASSWORD", "IC_SSL_PROTOCOL", "IC_SECURITY_PROTOCOL", + "IC_SASL_MECHANISM", "IC_SASL_JAAS_CONFIG", "IC_BOOTSTRAP_SERVERS", "IC_TRUSTSTORE"} + for _, s := range env_vars { + if v := os.Getenv(s); v == "" { + fatalMessage := fmt.Sprintf("When IC_TEST_KAFKA_CONNECT is set, %s must be set for acceptance tests", s) + t.Fatal(fatalMessage, s) + } + } + } + } + } +} func getOptionalEnv(key, fallback string) string { - value := os.Getenv(key) - if len(value) == 0 { - return fallback - } - return value + value := os.Getenv(key) + if len(value) == 0 { + return fallback + } + return value }