Skip to content

Commit

Permalink
Dedicated zookeeper support (#45)
Browse files Browse the repository at this point in the history
* Added support for (soon to be released) dedicated zookeeper feature for Instaclustr managed Kafka clusters

* Forgot to remove commenting

* Addressed Arjun's comments

* bump version

* bumped the Makefile version to 1.5.0

Co-authored-by: Hendra Gunadi <[email protected]>
  • Loading branch information
h3nd24 and Hendra Gunadi authored Sep 27, 2020
1 parent 9d114e0 commit 0643479
Show file tree
Hide file tree
Showing 9 changed files with 124 additions and 22 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
BIN_NAME="terraform-provider-instaclustr"
VERSION=v1.4.0
VERSION=v1.5.0

.PHONY: install clean all build test testacc

Expand Down
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,9 @@ ssl_enabled_protocols, ssl_truststore_password, ssl_protocol, security_protocol,
truststore|Base64 encoded version of the TLS trust store (in JKS format) used to connect to your Kafka Cluster. Only required if connecting to a Non-Instaclustr managed Kafka Cluster with TLS enabled|Kafka Connect
master_nodes|The number of Master nodes in a generated Redis Cluster.|Redis|Required (Integers)
replica_nodes|The number of Replica nodes in a generated Redis Cluster.|Redis|Required (Integers)
dedicated_zookeeper|Indicate whether this Kafka cluster should allocate dedicated Zookeeper nodes|Kafka|false
zookeeper_node_size|If `dedicated_zookeeper` is true, then it is the node size for the dedicated Zookeeper nodes. Have a look [here](https://www.instaclustr.com/support/api-integrations/api-reference/provisioning-api/#section-create-cluster) (Kafka bundle options table) for node size options. |Kafka
zookeeper_node_count|If `dedicated_zookeeper` is true, then it indicates how many nodes are allocated to be Zookeeper nodes|Kafka

### Resource: `instaclustr_firewall_rule`
A resource for managing cluster firewall rules on Instaclustr Managed Platform. A firewall rule allows access to your Instaclustr cluster.
Expand Down
5 changes: 4 additions & 1 deletion examples/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,12 @@ resource "instaclustr_cluster" "example_kafka" {

bundle {
bundle = "KAFKA"
version = "2.3.1"
version = "2.5.1"
options = {
auth_n_authz = true
dedicated_zookeeper = true
zookeeper_node_size = "zk-production-m5.large-60"
zookeeper_node_count = 3
}
}

Expand Down
24 changes: 23 additions & 1 deletion instaclustr/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,18 @@ func resourceCluster() *schema.Resource {
Type: schema.TypeString,
Optional: true,
},
"dedicated_zookeeper": {
Type: schema.TypeBool,
Optional: true,
},
"zookeeper_node_size": {
Type: schema.TypeString,
Optional: true,
},
"zookeeper_node_count": {
Type: schema.TypeInt,
Optional: true,
},
"master_nodes": {
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -371,7 +383,17 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error {
d.Set("cluster_id", cluster.ID)
d.Set("cluster_name", cluster.ClusterName)

nodeSize := cluster.DataCentres[0].Nodes[0].Size
nodeSize := ""
/*
* Ideally, we would like this information to be coming directly from the API cluster status.
* Hence, this is a slightly hacky way of ignoring zookeeper node sizes (Kafka bundle specific).
*/
for _, node := range(cluster.DataCentres[0].Nodes) {
nodeSize = node.Size
if (!strings.HasPrefix(nodeSize, "zk-")) {
break
}
}
if len(cluster.DataCentres[0].ResizeTargetNodeSize) > 0 {
nodeSize = cluster.DataCentres[0].ResizeTargetNodeSize
}
Expand Down
3 changes: 3 additions & 0 deletions instaclustr/structs.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ type BundleOptions struct {
Truststore string `json:"truststore,omitempty" mapstructure:"truststore"`
RedisMasterNodes string `json:"masterNodes,omitempty" mapstructure:"master_nodes"`
RedisReplicaNodes string `json:"replicaNodes,omitempty" mapstructure:"replica_nodes"`
DedicatedZookeeper string `json:"dedicatedZookeeper,omitempty" mapstructure:"dedicated_zookeeper"`
ZookeeperNodeSize string `json:"zookeeperNodeSize,omitempty" mapstructure:"zookeeper_node_size"`
ZookeeperNodeCount string `json:"zookeeperNodeCount,omitempty" mapstructure:"zookeeper_node_count"`
}

type ClusterProvider struct {
Expand Down
31 changes: 31 additions & 0 deletions test/data/kafka_user_create_cluster.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// This is part of testing "kafka user" suite, 1 of 3
provider "instaclustr" {
username = "%s"
api_key = "%s"
api_hostname = "%s"
}

resource "instaclustr_cluster" "kafka_cluster" {
cluster_name = "example_kafka_tf_test"
node_size = "t3.small-20-gp2"
data_centre = "US_WEST_2"
sla_tier = "NON_PRODUCTION"
cluster_network = "192.168.0.0/18"
cluster_provider = {
name = "AWS_VPC"
}
rack_allocation = {
number_of_racks = 3
nodes_per_rack = 1
}

bundle {
bundle = "KAFKA"
version = "2.5.1"
options = {
dedicated_zookeeper = true
zookeeper_node_size = "%s"
zookeeper_node_count = 3
}
}
}
38 changes: 38 additions & 0 deletions test/data/kafka_user_create_user.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
// This is part of testing "kafka user" suite, 2 of 3
provider "instaclustr" {
username = "%s"
api_key = "%s"
api_hostname = "%s"
}

resource "instaclustr_cluster" "kafka_cluster" {
cluster_name = "example_kafka_tf_test"
node_size = "t3.small-20-gp2"
data_centre = "US_WEST_2"
sla_tier = "NON_PRODUCTION"
cluster_network = "192.168.0.0/18"
cluster_provider = {
name = "AWS_VPC"
}
rack_allocation = {
number_of_racks = 3
nodes_per_rack = 1
}

bundle {
bundle = "KAFKA"
version = "2.5.1"
options = {
dedicated_zookeeper = true
zookeeper_node_size = "%s"
zookeeper_node_count = 3
}
}
}

resource "instaclustr_kafka_user" "kafka_user_charlie" {
cluster_id = "${instaclustr_cluster.kafka_cluster.cluster_id}"
username = "%s"
password = "%s"
initial_permissions = "none"
}
18 changes: 11 additions & 7 deletions test/data/kafka_user.tf → test/data/kafka_user_user_list.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
// This is part of testing "kafka user" suite, 3 of 3
provider "instaclustr" {
username = "%s"
api_key = "%s"
Expand All @@ -20,19 +21,22 @@ resource "instaclustr_cluster" "kafka_cluster" {

bundle {
bundle = "KAFKA"
version = "2.3.1"
version = "2.5.1"
options = {
dedicated_zookeeper = true
zookeeper_node_size = "%s"
zookeeper_node_count = 3
}
}
}
/*

resource "instaclustr_kafka_user" "kafka_user_charlie" {
cluster_id = "${instaclustr_cluster.kafka_cluster.cluster_id}"
username = "@@KAFKA_USERNAME@@"
password = "@@KAFKA_USER_PASSWORD@@"
username = "%s"
password = "%s"
initial_permissions = "none"
}
*/
/*

data "instaclustr_kafka_user_list" "kafka_user_list" {
cluster_id = "${instaclustr_cluster.kafka_cluster.cluster_id}"
}
*/
22 changes: 10 additions & 12 deletions test/resource_kafka_user_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"os"
"testing"
"time"
"strings"
"strconv"

"github.com/hashicorp/terraform/helper/resource"
Expand All @@ -19,23 +18,22 @@ func TestKafkaUserResource(t *testing.T) {
"instaclustr": instaclustr.Provider(),
}

tfFile, _ := ioutil.ReadFile("data/kafka_user.tf")
oriConfig := string(tfFile)
configBytes1, _ := ioutil.ReadFile("data/kafka_user_create_cluster.tf")
configBytes2, _ := ioutil.ReadFile("data/kafka_user_create_user.tf")
configBytes3, _ := ioutil.ReadFile("data/kafka_user_user_list.tf")
username := os.Getenv("IC_USERNAME")
apiKey := os.Getenv("IC_API_KEY")
hostname := getOptionalEnv("IC_API_URL", instaclustr.DefaultApiHostname)

kafka_username := "charlie"
kafkaUsername := "charlie"
oldPassword := "charlie123!"
newPassword := "charlie123standard!"
const KAFKA_USERNAME_PATTERN = "@@KAFKA_USERNAME@@"
const KAFKA_USER_PASSWORD_PATTERN = "@@KAFKA_USER_PASSWORD@@"
zookeeperNodeSize := "zk-developer-t3.small-20"

createClusterConfig := fmt.Sprintf(oriConfig, username, apiKey, hostname)
createKafkaUserConfigRaw := strings.Replace(strings.Replace(createClusterConfig, "/*", "", 1), "*/", "", 1)
createKafkaUserConfig := strings.ReplaceAll(strings.ReplaceAll(createKafkaUserConfigRaw, KAFKA_USERNAME_PATTERN, kafka_username), KAFKA_USER_PASSWORD_PATTERN, oldPassword)
createKafkaUserListConfig := strings.Replace(strings.Replace(createKafkaUserConfig, "/*", "", 1), "*/", "", 1)
updateKafkaUserConfig := strings.Replace(createKafkaUserConfig, oldPassword, newPassword, 1)
createClusterConfig := fmt.Sprintf(string(configBytes1), username, apiKey, hostname, zookeeperNodeSize)
createKafkaUserConfig := fmt.Sprintf(string(configBytes2), username, apiKey, hostname, zookeeperNodeSize, kafkaUsername, oldPassword)
createKafkaUserListConfig := fmt.Sprintf(string(configBytes3), username, apiKey, hostname, zookeeperNodeSize, kafkaUsername, oldPassword)
updateKafkaUserConfig := fmt.Sprintf(string(configBytes3), username, apiKey, hostname, zookeeperNodeSize, kafkaUsername, newPassword)

resource.Test(t, resource.TestCase{
Providers: testProviders,
Expand Down Expand Up @@ -71,7 +69,7 @@ func TestKafkaUserResource(t *testing.T) {
// i.e., we need to destroy the kafka user resources first.
{
Config: createClusterConfig,
Check: checkKafkaUserDeleted(kafka_username, hostname, username, apiKey),
Check: checkKafkaUserDeleted(kafkaUsername, hostname, username, apiKey),
},
},
})
Expand Down

0 comments on commit 0643479

Please sign in to comment.