Skip to content

Commit

Permalink
Merge pull request #56 from hail-insta/add-pw-and-cert
Browse files Browse the repository at this point in the history
Add Cluster Credentials Data Source to allow using password and certificate download in Terraform config
  • Loading branch information
arjunrajshekhar authored Nov 10, 2020
2 parents ccb1480 + 2adfe2f commit c5daeb5
Show file tree
Hide file tree
Showing 11 changed files with 186 additions and 51 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
BIN_NAME="terraform-provider-instaclustr"
VERSION=v1.6.1
VERSION=v1.7.0

.PHONY: install clean all build test testacc

Expand Down
11 changes: 10 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ resource "instaclustr_cluster" "example" {

## Configuration
### Resources
### Resource: `instaclustr_cluster`
### Resource: `instaclustr_cluster`
A resource for managing clusters on Instaclustr Managed Platform. A cluster contains a base application and several add-ons.

#### Properties
Expand Down Expand Up @@ -167,6 +167,15 @@ dedicated_zookeeper|Indicate whether this Kafka cluster should allocate dedicate
zookeeper_node_size|If `dedicated_zookeeper` is true, then it is the node size for the dedicated Zookeeper nodes. Have a look [here](https://www.instaclustr.com/support/api-integrations/api-reference/provisioning-api/#section-create-cluster) (Kafka bundle options table) for node size options. |Kafka
zookeeper_node_count|If `dedicated_zookeeper` is true, then it indicates how many nodes are allocated to be Zookeeper nodes|Kafka

### Data Source `instaclustr_cluster_credentials`
A read-only data source used to get the password and certificate download link of a cluster.

Property | Description | Default
---------|-------------|--------
cluster_id|The ID of an existing Instaclustr cluster.|Required
cluster_password|The password of the existing Instaclustr cluster.|Computed
certificate_download|The certificate download link of the existing Instaclustr cluster.|Computed

### Resource: `instaclustr_firewall_rule`
A resource for managing cluster firewall rules on Instaclustr Managed Platform. A firewall rule allows access to your Instaclustr cluster.
Note: Either `rule_cidr` OR `rule_security_group_id` must be provided per rule (but not both)
Expand Down
5 changes: 5 additions & 0 deletions examples/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ resource "instaclustr_encryption_key" "add_ebs_key" {
provider = "INSTACLUSTR"
}


resource "instaclustr_cluster" "example2" {
cluster_name = "testcluster"
node_size = "t3.small"
Expand Down Expand Up @@ -47,6 +48,10 @@ resource "instaclustr_cluster" "example2" {
}
}

data "instaclustr_cluster_credentials" "example_credentials" {
cluster_id = "${instaclustr_cluster.example2.id}"
}

resource "instaclustr_cluster" "custom_vpc_example" {
cluster_name = "testcluster"
node_size = "t3.small"
Expand Down
48 changes: 48 additions & 0 deletions instaclustr/data_source_cluster_credentials.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package instaclustr

import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"log"
)

func dataSourceClusterCredentials() *schema.Resource {
return &schema.Resource{
Read: dataSourceClusterCredentialsRead,

Schema: map[string]*schema.Schema{
"cluster_id": {
Type: schema.TypeString,
Required: true,
},
"cluster_password": {
Type: schema.TypeString,
Computed: true,
Sensitive: true,
},
"certificate_download": {
Type: schema.TypeString,
Computed: true,
},
},
}
}

func dataSourceClusterCredentialsRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*Config).Client
id := d.Get("cluster_id").(string)

log.Printf("[INFO] Reading credentials of cluster %s.", id)
cluster, err := client.ReadCluster(id)

if err != nil {
return fmt.Errorf("[Error] Error reading cluster credentials: #{err}")
}

d.SetId(fmt.Sprintf("%s-credentials", id))
d.Set("cluster_id", id)
d.Set("cluster_password", cluster.InstaclustrUserPassword)
d.Set("certificate_download", cluster.ClusterCertificateDownload)

return nil
}
11 changes: 6 additions & 5 deletions instaclustr/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,15 @@ func Provider() *schema.Provider {
},

ResourcesMap: map[string]*schema.Resource{
"instaclustr_cluster": resourceCluster(),
"instaclustr_encryption_key": resourceEncryptionKey(),
"instaclustr_firewall_rule": resourceFirewallRule(),
"instaclustr_vpc_peering": resourceVpcPeering(),
"instaclustr_kafka_user": resourceKafkaUser(),
"instaclustr_cluster": resourceCluster(),
"instaclustr_encryption_key": resourceEncryptionKey(),
"instaclustr_firewall_rule": resourceFirewallRule(),
"instaclustr_vpc_peering": resourceVpcPeering(),
"instaclustr_kafka_user": resourceKafkaUser(),
},
DataSourcesMap: map[string]*schema.Resource{
"instaclustr_kafka_user_list": dataSourceKafkaUserList(),
"instaclustr_cluster_credentials": dataSourceClusterCredentials(),
},
}
provider.ConfigureFunc = providerConfigure
Expand Down
2 changes: 1 addition & 1 deletion instaclustr/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ func getBundles(d *schema.ResourceData) ([]Bundle, error) {
bundles := make([]Bundle, 0)
for _, inBundle := range d.Get("bundle").([]interface{}) {
var bundle Bundle
err := mapstructure.Decode(inBundle.(map[string]interface{}), &bundle)
err := mapstructure.WeakDecode(inBundle.(map[string]interface{}), &bundle)
if err != nil {
return nil, err
}
Expand Down
22 changes: 11 additions & 11 deletions instaclustr/structs.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,18 @@ type Bundle struct {
}

type BundleOptions struct {
AuthnAuthz string `json:"authnAuthz,omitempty" mapstructure:"auth_n_authz"`
ClientEncryption string `json:"clientEncryption,omitempty" mapstructure:"client_encryption"`
DedicatedMasterNodes string `json:"dedicatedMasterNodes,omitempty" mapstructure:"dedicated_master_nodes"`
AuthnAuthz bool `json:"authnAuthz,omitempty" mapstructure:"auth_n_authz"`
ClientEncryption bool `json:"clientEncryption,omitempty" mapstructure:"client_encryption"`
DedicatedMasterNodes bool `json:"dedicatedMasterNodes,omitempty" mapstructure:"dedicated_master_nodes"`
MasterNodeSize string `json:"masterNodeSize,omitempty" mapstructure:"master_node_size"`
SecurityPlugin string `json:"securityPlugin,omitempty" mapstructure:"security_plugin"`
UsePrivateBroadcastRpcAddress string `json:"usePrivateBroadcastRPCAddress,omitempty" mapstructure:"use_private_broadcast_rpc_address"`
LuceneEnabled string `json:"luceneEnabled,omitempty" mapstructure:"lucene_enabled"`
ContinuousBackupEnabled string `json:"continuousBackupEnabled,omitempty" mapstructure:"continuous_backup_enabled"`
SecurityPlugin bool `json:"securityPlugin,omitempty" mapstructure:"security_plugin"`
UsePrivateBroadcastRpcAddress bool `json:"usePrivateBroadcastRPCAddress,omitempty" mapstructure:"use_private_broadcast_rpc_address"`
LuceneEnabled bool `json:"luceneEnabled,omitempty" mapstructure:"lucene_enabled"`
ContinuousBackupEnabled bool `json:"continuousBackupEnabled,omitempty" mapstructure:"continuous_backup_enabled"`
NumberPartitions string `json:"numberPartitions,omitempty" mapstructure:"number_partitions"`
AutoCreateTopics string `json:"autoCreateTopics,omitempty" mapstructure:"auto_create_topics"`
DeleteTopics string `json:"deleteTopics,omitempty" mapstructure:"delete_topics"`
PasswordAuthentication string `json:"passwordAuthentication,omitempty" mapstructure:"password_authentication"`
AutoCreateTopics bool `json:"autoCreateTopics,omitempty" mapstructure:"auto_create_topics"`
DeleteTopics bool `json:"deleteTopics,omitempty" mapstructure:"delete_topics"`
PasswordAuthentication bool `json:"passwordAuthentication,omitempty" mapstructure:"password_authentication"`
TargetKafkaClusterId string `json:"targetKafkaClusterId,omitempty" mapstructure:"target_kafka_cluster_id"`
VPCType string `json:"vpcType,omitempty" mapstructure:"vpc_type"`
AWSAccessKeyId string `json:"aws.access.key.id,omitempty" mapstructure:"aws_access_key"`
Expand All @@ -47,7 +47,7 @@ type BundleOptions struct {
Truststore string `json:"truststore,omitempty" mapstructure:"truststore"`
RedisMasterNodes string `json:"masterNodes,omitempty" mapstructure:"master_nodes"`
RedisReplicaNodes string `json:"replicaNodes,omitempty" mapstructure:"replica_nodes"`
DedicatedZookeeper string `json:"dedicatedZookeeper,omitempty" mapstructure:"dedicated_zookeeper"`
DedicatedZookeeper bool `json:"dedicatedZookeeper,omitempty" mapstructure:"dedicated_zookeeper"`
ZookeeperNodeSize string `json:"zookeeperNodeSize,omitempty" mapstructure:"zookeeper_node_size"`
ZookeeperNodeCount string `json:"zookeeperNodeCount,omitempty" mapstructure:"zookeeper_node_count"`
}
Expand Down
1 change: 1 addition & 0 deletions test/data/valid_redis_cluster_create.tf
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
provider "instaclustr" {
username = "%s"
api_key = "%s"
api_hostname = "%s"
}

resource "instaclustr_cluster" "validRedis" {
Expand Down
39 changes: 39 additions & 0 deletions test/data/valid_with_password_and_client_encryption.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
provider "instaclustr" {
username = "%s"
api_key = "%s"
api_hostname = "%s"
}

resource "instaclustr_cluster" "valid_with_password_and_client_encryption" {
cluster_name = "tf-provider-test-auth-n-ce"
node_size = "m5l-250-v2"
data_centre = "US_WEST_2"
sla_tier = "NON_PRODUCTION"
cluster_network = "192.168.0.0/18"
private_network_cluster = false
pci_compliant_cluster = false
cluster_provider = {
name = "AWS_VPC"
}
rack_allocation = {
number_of_racks = 3
nodes_per_rack = 1
}

bundle {
bundle = "APACHE_CASSANDRA"
version = "3.11.4"
options = {
auth_n_authz = true
use_private_broadcast_rpc_address = true
lucene_enabled = true
continuous_backup_enabled = true
password_authentication = true
client_encryption = true
}
}
}

data "instaclustr_cluster_credentials" "cluster_credentials" {
cluster_id = "${instaclustr_cluster.valid_with_password_and_client_encryption.id}"
}
94 changes: 63 additions & 31 deletions test/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ func TestAccCluster(t *testing.T) {
validConfig, _ := ioutil.ReadFile("data/valid.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey)
updatedConfig := strings.Replace(oriConfig, "testcluster", "newcluster", 1)
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)
updatedConfig := strings.Replace(oriConfig, "testcluster", "newcluster", 1)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() { AccTestEnvVarsCheck(t) },
Expand Down Expand Up @@ -59,8 +59,8 @@ func TestKafkaConnectClusterCreateInstaclustrAWS(t *testing.T) {
awsAccessKey := os.Getenv("IC_AWS_ACCESS_KEY")
awsSecretKey := os.Getenv("IC_AWS_SECRET_KEY")
S3BucketName := os.Getenv("IC_S3_BUCKET_NAME")
oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, kafkaClusterId, awsAccessKey, awsSecretKey, S3BucketName)
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, hostname, kafkaClusterId, awsAccessKey, awsSecretKey, S3BucketName)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() { AccTestEnvVarsCheck(t) },
Expand Down Expand Up @@ -99,10 +99,10 @@ func TestKafkaConnectClusterCreateNonInstaclustrAZURE(t *testing.T) {
saslJaasConfig := os.Getenv("IC_SASL_JAAS_CONFIG")
bootstrapServers := os.Getenv("IC_BOOTSTRAP_SERVER")
truststore := os.Getenv("IC_TRUSTSTORE")
oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, azureStorageAccountName,
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriKCConfig := fmt.Sprintf(string(validKCConfig), username, apiKey, hostname, azureStorageAccountName,
azureStorageAccountKey, azureStorageContainerName, sslEnabledProtocols, sslTruststorePassword,
sslProtocol, securityProtocol, saslMechanism, saslJaasConfig, bootstrapServers, truststore)
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() { AccTestEnvVarsCheck(t) },
Expand All @@ -119,29 +119,6 @@ func TestKafkaConnectClusterCreateNonInstaclustrAZURE(t *testing.T) {
})
}

func TestKafkaConnectClusterInvalid(t *testing.T) {
testAccProvider := instaclustr.Provider()
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": testAccProvider,
}
readConfig, _ := ioutil.ReadFile("data/invalid_kafka_connect.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
invalidConfig := fmt.Sprintf(string(readConfig), username, apiKey, hostname)
fmt.Printf("Config : %s", invalidConfig)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() { AccTestEnvVarsCheck(t) },
Steps: []resource.TestStep{
{
Config: invalidConfig,
ExpectError: regexp.MustCompile("Error creating cluster"),
},
},
})
}

func TestAccClusterResize(t *testing.T) {
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": instaclustr.Provider(),
Expand Down Expand Up @@ -242,7 +219,7 @@ func TestAccClusterCustomVPC(t *testing.T) {
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
providerAccountName := os.Getenv("IC_PROV_ACC_NAME")
providerVpcId := os.Getenv("IC_PROV_VPC_ID")
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, providerAccountName, providerVpcId)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname, providerAccountName, providerVpcId)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() { AccTestEnvVarsCheck(t) },
Expand All @@ -267,13 +244,14 @@ func TestAccClusterCustomVPCInvalid(t *testing.T) {
validConfig, _ := ioutil.ReadFile("data/invalid_with_custom_vpc.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
providerAccountName := os.Getenv("IC_PROV_ACC_NAME")
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() { AccTestEnvVarsCheck(t) },
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(string(validConfig), username, apiKey, providerAccountName),
Config: fmt.Sprintf(string(validConfig), username, hostname, apiKey, providerAccountName),
ExpectError: regexp.MustCompile("Error creating cluster"),
},
},
Expand Down Expand Up @@ -353,8 +331,8 @@ func TestValidRedisClusterCreate(t *testing.T) {
validConfig, _ := ioutil.ReadFile("data/valid_redis_cluster_create.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey)
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() {
Expand All @@ -375,3 +353,57 @@ func TestValidRedisClusterCreate(t *testing.T) {
},
})
}

func TestAccClusterCredentials(t *testing.T) {
testAccProviders := map[string]terraform.ResourceProvider{
"instaclustr": instaclustr.Provider(),
}
validConfig, _ := ioutil.ReadFile("data/valid_with_password_and_client_encryption.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)
oriConfig := fmt.Sprintf(string(validConfig), username, apiKey, hostname)

resource.Test(t, resource.TestCase{
Providers: testAccProviders,
PreCheck: func() { AccTestEnvVarsCheck(t) },
CheckDestroy: testCheckResourceDeleted("valid_with_password_and_client_encryption", hostname, username, apiKey),
Steps: []resource.TestStep{
{
Config: oriConfig,
Check: resource.ComposeTestCheckFunc(
testCheckClusterCredentials(hostname, username, apiKey),
),
},
},
})
}

func testCheckClusterCredentials(hostname, username, apiKey string) resource.TestCheckFunc {
return func(s *terraform.State) error {
resourceState := s.Modules[0].Resources["data.instaclustr_cluster_credentials.cluster_credentials"]

client := new(instaclustr.APIClient)
client.InitClient(hostname, username, apiKey)
clusterId := resourceState.Primary.Attributes["cluster_id"]

clusterCredentials, err := client.ReadClusterCredentials(clusterId)
if err != nil {
return fmt.Errorf("Failed to read Cluster Credentials from %s: %s", clusterId, err)
}

if clusterCredentials.ClusterPassword != resourceState.Primary.Attributes["cluster_password"] {
return fmt.Errorf("Password of the cluster and resource are different")
}

if clusterCredentials.ClusterCertificateDownload != resourceState.Primary.Attributes["certificate_download"] {
return fmt.Errorf("Certificate download link of the cluster and resource are different")
}

if clusterCredentials.ClusterCertificateDownload == "disabled" {
return fmt.Errorf("Client encryption is disabled")
}

return nil
}
}
2 changes: 1 addition & 1 deletion test/resource_kafka_user_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func checkKafkaClusterRunning(hostname, username, apiKey string) resource.TestCh
return fmt.Errorf("[Error] Timed out waiting for cluster to have the status 'RUNNING'. Current cluster status is '%s'", latestStatus)
}
timePassed += ClusterReadInterval
fmt.Printf("\033[u\033[K%ds has elapsed while waiting for the cluster to reach RUNNING.", timePassed)
fmt.Printf("\033[u\033[K%ds has elapsed while waiting for the cluster to reach RUNNING.\n", timePassed)
time.Sleep(ClusterReadInterval * time.Second)
}
fmt.Printf("\n")
Expand Down

0 comments on commit c5daeb5

Please sign in to comment.