From 3070bca82d325e88debce9086e746ccb9d1fb8d8 Mon Sep 17 00:00:00 2001 From: Naya Chen Date: Thu, 6 Jan 2022 16:30:11 -0800 Subject: [PATCH 1/3] Add command args to container creation (#6) formatting formatting --- apis/druid/v1alpha1/druid_types.go | 9 ++++++++- .../crds/druid.apache.org_druids.yaml | 4 ++++ controllers/druid/handler.go | 19 ++++++++++++++++++- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/apis/druid/v1alpha1/druid_types.go b/apis/druid/v1alpha1/druid_types.go index 6adc5e4e..b721e725 100644 --- a/apis/druid/v1alpha1/druid_types.go +++ b/apis/druid/v1alpha1/druid_types.go @@ -91,9 +91,16 @@ type DruidSpec struct { // +optional DeleteOrphanPvc bool `json:"deleteOrphanPvc"` - // Required: path to druid start script to be run on container start + // Required: Command to be run on container start StartScript string `json:"startScript"` + // Optional: bash/sh entry arg. Set startScript to `sh` or `bash` to customize entryArg + // For example, the container can run `sh -c "${EntryArg} && ${DruidScript} {nodeType}"` + EntryArg string `json:"entryArg,omitempty"` + + // Optional: Customized druid shell script path. If not set, the default would be "bin/run-druid.sh" + DruidScript string `json:"druidScript,omitempty"` + // Required here or at nodeSpec level Image string `json:"image,omitempty"` diff --git a/chart/templates/crds/druid.apache.org_druids.yaml b/chart/templates/crds/druid.apache.org_druids.yaml index 5a2dbc37..0735e3bb 100644 --- a/chart/templates/crds/druid.apache.org_druids.yaml +++ b/chart/templates/crds/druid.apache.org_druids.yaml @@ -3979,6 +3979,10 @@ spec: type: array startScript: type: string + entryArg: + type: string + druidScript: + type: string startUpProbe: properties: exec: diff --git a/controllers/druid/handler.go b/controllers/druid/handler.go index 40d348b4..9942f21b 100644 --- a/controllers/druid/handler.go +++ b/controllers/druid/handler.go @@ -9,6 +9,7 @@ import ( "fmt" "regexp" "sort" + "strings" autoscalev2beta2 "k8s.io/api/autoscaling/v2beta2" networkingv1 "k8s.io/api/networking/v1" @@ -1138,6 +1139,21 @@ func getVolume(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, nodeSpecUniq return volumesHolder } +func getCommand(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid) []string { + if m.Spec.StartScript != "" && m.Spec.EntryArg != "" { + return []string{m.Spec.StartScript} + } + return []string{firstNonEmptyStr(m.Spec.StartScript, "bin/run-druid.sh"), nodeSpec.NodeType} +} + +func getEntryArg(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid) []string { + if m.Spec.EntryArg != "" { + bashCommands := strings.Join([]string{m.Spec.EntryArg, "&&", firstNonEmptyStr(m.Spec.DruidScript, "bin/run-druid.sh"), nodeSpec.NodeType}, " ") + return []string{"-c", bashCommands} + } + return nil +} + func getEnv(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, configMapSHA string) []v1.EnvVar { envHolder := firstNonNilValue(nodeSpec.Env, m.Spec.Env).([]v1.EnvVar) // enables to do the trick to force redeployment in case of configmap changes. @@ -1308,7 +1324,8 @@ func makePodSpec(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, nodeSpecUn v1.Container{ Image: firstNonEmptyStr(nodeSpec.Image, m.Spec.Image), Name: fmt.Sprintf("%s", nodeSpecUniqueStr), - Command: []string{firstNonEmptyStr(m.Spec.StartScript, "bin/run-druid.sh"), nodeSpec.NodeType}, + Command: getCommand(nodeSpec, m), + Args: getEntryArg(nodeSpec, m), ImagePullPolicy: v1.PullPolicy(firstNonEmptyStr(string(nodeSpec.ImagePullPolicy), string(m.Spec.ImagePullPolicy))), Ports: nodeSpec.Ports, Resources: nodeSpec.Resources, From 8f28155f420cc8d1c3c1af057aa99d8c732311a6 Mon Sep 17 00:00:00 2001 From: CodingParsley Date: Mon, 31 Oct 2022 22:00:56 +0000 Subject: [PATCH 2/3] add test --- controllers/druid/handler_test.go | 15 + .../broker-statefulset-start-command.yaml | 94 +++++ .../testdata/druid-test-cr-start-command.yaml | 369 ++++++++++++++++++ 3 files changed, 478 insertions(+) create mode 100644 controllers/druid/testdata/broker-statefulset-start-command.yaml create mode 100644 controllers/druid/testdata/druid-test-cr-start-command.yaml diff --git a/controllers/druid/handler_test.go b/controllers/druid/handler_test.go index 7774c8d7..a0019951 100644 --- a/controllers/druid/handler_test.go +++ b/controllers/druid/handler_test.go @@ -29,6 +29,21 @@ func TestMakeStatefulSetForBroker(t *testing.T) { assertEquals(expected, actual, t) } +func TestMakeStatefulSetForBrokerWithStartCommands(t *testing.T) { + clusterSpec := readDruidClusterSpecFromFile(t, "testdata/druid-test-cr-start-command.yaml") + + nodeSpecUniqueStr := makeNodeSpecificUniqueString(clusterSpec, "brokers") + nodeSpec := clusterSpec.Spec.Nodes["brokers"] + + actual, _ := makeStatefulSet(&nodeSpec, clusterSpec, makeLabelsForNodeSpec(&nodeSpec, clusterSpec, clusterSpec.Name, nodeSpecUniqueStr), nodeSpecUniqueStr, "blah", nodeSpecUniqueStr) + addHashToObject(actual) + + expected := new(appsv1.StatefulSet) + readAndUnmarshallResource("testdata/broker-statefulset-start-command.yaml", &expected, t) + + assertEquals(expected, actual, t) +} + func TestMakeStatefulSetForBrokerWithSidecar(t *testing.T) { clusterSpec := readSampleDruidClusterSpecWithSidecar(t) diff --git a/controllers/druid/testdata/broker-statefulset-start-command.yaml b/controllers/druid/testdata/broker-statefulset-start-command.yaml new file mode 100644 index 00000000..926adae3 --- /dev/null +++ b/controllers/druid/testdata/broker-statefulset-start-command.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-druid-test-brokers + namespace: test-namespace + labels: + app: druid + druid_cr: druid-test + nodeSpecUniqueStr: druid-druid-test-brokers + component: broker + annotations: + druidOpResourceHash: jDYG8FQEDYnyd6LHB++phICQiD0= +spec: + podManagementPolicy: Parallel + replicas: 2 + selector: + matchLabels: + app: druid + druid_cr: druid-test + nodeSpecUniqueStr: druid-druid-test-brokers + component: broker + serviceName: druid-druid-test-brokers + template: + metadata: + labels: + app: druid + druid_cr: druid-test + nodeSpecUniqueStr: druid-druid-test-brokers + component: broker + annotations: + key1: value1 + key2: value2 + spec: + tolerations: [] + affinity: {} + containers: + - command: + - sh + args: + - -c + - echo 'Hello World' && druid-test.sh broker + image: himanshu01/druid:druid-0.12.0-1 + name: druid-druid-test-brokers + env: + - name : configMapSHA + value : blah + ports: + - containerPort: 8083 + name: random + readinessProbe: + httpGet: + path: /status + port: 8080 + livenessProbe: + httpGet: + path: /status + port: 8080 + resources: + limits: + cpu: "4" + memory: 2Gi + requests: + cpu: "4" + memory: 2Gi + volumeMounts: + - mountPath: /druid/conf/druid/_common + readOnly: true + name: common-config-volume + - mountPath: /druid/conf/druid/broker + readOnly: true + name: nodetype-config-volume + - mountPath: /druid/data + readOnly: true + name: data-volume + securityContext: + fsGroup: 107 + runAsUser: 106 + volumes: + - configMap: + name: druid-test-druid-common-config + name: common-config-volume + - configMap: + name: druid-druid-test-brokers-config + name: nodetype-config-volume + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 diff --git a/controllers/druid/testdata/druid-test-cr-start-command.yaml b/controllers/druid/testdata/druid-test-cr-start-command.yaml new file mode 100644 index 00000000..ba3b56bd --- /dev/null +++ b/controllers/druid/testdata/druid-test-cr-start-command.yaml @@ -0,0 +1,369 @@ +apiVersion: "druid.apache.org/v1alpha1" +kind: "Druid" +metadata: + name: druid-test + namespace: test-namespace +spec: + image: himanshu01/druid:druid-0.12.0-1 + startScript: sh + entryArg: echo 'Hello World' + DruidScript: druid-test.sh + podAnnotations: + key1: value1 + key2: value2 + securityContext: + fsGroup: 107 + runAsUser: 106 + readinessProbe: + httpGet: + path: /status + zookeeper: + type: default + spec: + properties: |- + druid.zk.service.host=zookeeper-0.zookeeper,zookeeper-1.zookeeper,zookeeper-2.zookeeper + druid.zk.paths.base=/druid + druid.zk.service.compress=false + metadataStore: + type: default + spec: + properties: |- + druid.metadata.storage.type=postgresql + druid.metadata.storage.connector.connectURI=jdbc:postgresql://rdsaddr.us-west-2.rds.amazonaws.com:5432/druiddb + druid.metadata.storage.connector.user=iamuser + druid.metadata.storage.connector.password=changeme + druid.metadata.storage.connector.createTables=true + deepStorage: + type: default + spec: + properties: |- + druid.storage.type=s3 + druid.storage.bucket=mybucket + druid.storage.baseKey=druid/segments + druid.s3.accessKey=accesskey + druid.s3.secretKey=secretkey + jvm.options: |- + -server + -XX:MaxDirectMemorySize=10240g + -Duser.timezone=UTC + -Dfile.encoding=UTF-8 + -Dlog4j.debug + -XX:+ExitOnOutOfMemoryError + -XX:+HeapDumpOnOutOfMemoryError + -XX:+UseG1GC + -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager + log4j.config: |- + + + + + + + + + + + + + + common.runtime.properties: |- + # + # Extensions + # + druid.extensions.loadList=["druid-datasketches", "druid-s3-extensions", "postgresql-metadata-storage"] + + # + # Logging + # + # Log all runtime properties on startup. Disable to avoid logging properties on startup: + druid.startup.logging.logProperties=true + + # + # Indexing service logs + # + # Store indexing logs in an S3 bucket named 'druid-deep-storage' with the + # prefix 'druid/indexing-logs' + druid.indexer.logs.type=s3 + druid.indexer.logs.s3Bucket=mybucket + druid.indexer.logs.s3Prefix=druid/indexing-logs + + # + # Service discovery + # + druid.selectors.indexing.serviceName=druid/overlord + druid.selectors.coordinator.serviceName=druid/coordinator + + # + # Monitoring + # + druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"] + druid.emitter=logging + druid.emitter.logging.logLevel=info + + # Storage type of double columns + # ommiting this will lead to index double as float at the storage layer + druid.indexing.doubleStorage=double + metricDimensions.json: |- + { + "query/time" : { "dimensions" : ["dataSource", "type"], "type" : "timer"} + } + nodes: + brokers: + nodeType: "broker" + services: + - + spec: + type: ClusterIP + clusterIP: None + - + metadata: + name: broker-%s-service + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + spec: + type: LoadBalancer + ports: + - + name: service-port + port: 8090 + targetPort: 8080 + druid.port: 8080 + replicas: 2 + podDisruptionBudgetSpec: + maxUnavailable: 1 + livenessProbe: + httpGet: + path: /status + ports: + - + name: random + containerPort: 8083 + runtime.properties: |- + druid.service=druid/broker + + # HTTP server threads + druid.broker.http.numConnections=5 + druid.server.http.numThreads=25 + + # Processing threads and buffers + druid.processing.buffer.sizeBytes=1 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + readOnly: true + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + coordinators: + nodeType: "coordinator" + druid.port: 8080 + replicas: 1 + ports: + - + name: random + containerPort: 8083 + runtime.properties: |- + druid.service=druid/coordinator + + # HTTP server threads + druid.coordinator.startDelay=PT30S + druid.coordinator.period=PT30S + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + historicals: + nodeType: "historical" + druid.port: 8080 + replicas: 2 + ports: + - + name: random + containerPort: 8084 + runtime.properties: |- + druid.service=druid/historical + druid.server.http.numThreads=10 + druid.processing.buffer.sizeBytes=268435456 + druid.processing.numMergeBuffers=1 + druid.processing.numThreads=1 + # Segment storage + druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] + druid.server.maxSize=10737418240 + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + overlords: + nodeType: "overlord" + druid.port: 8080 + replicas: 1 + ports: + - + name: random + containerPort: 8083 + runtime.properties: |- + druid.service=druid/overlord + + # HTTP server threads + druid.indexer.queue.startDelay=PT30S + druid.indexer.runner.type=remote + druid.indexer.storage.type=metadata + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "2Gi" + cpu: "4" + limits: + memory: "2Gi" + cpu: "4" + + middlemanagers: + nodeType: "middleManager" + druid.port: 8080 + replicas: 1 + ports: + - + name: peon-0-pt + containerPort: 8100 + - + name: peon-1-pt + containerPort: 8101 + - + name: peon-2-pt + containerPort: 8102 + - + name: peon-3-pt + containerPort: 8103 + - + name: peon-4-pt + containerPort: 8104 + - + name: peon-5-pt + containerPort: 8105 + - + name: peon-6-pt + containerPort: 8106 + - + name: peon-7-pt + containerPort: 8107 + - + name: peon-8-pt + containerPort: 8108 + - + name: peon-9-pt + containerPort: 8109 + + runtime.properties: |- + druid.service=druid/middleManager + druid.worker.capacity=1 + druid.indexer.runner.javaOpts=-server -XX:MaxDirectMemorySize=10240g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/druid/data/tmp -Dlog4j.debug -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=50 -XX:GCLogFileSize=10m -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:+UseG1GC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Xloggc:/druid/data/logs/peon.gc.%t.%p.log -XX:HeapDumpPath=/druid/data/logs/peon.%t.%p.hprof -Xms1G -Xmx1G + druid.indexer.task.baseTaskDir=/druid/data/baseTaskDir + druid.server.http.numThreads=10 + druid.indexer.fork.property.druid.processing.buffer.sizeBytes=268435456 + druid.indexer.fork.property.druid.processing.numMergeBuffers=1 + druid.indexer.fork.property.druid.processing.numThreads=1 + druid.indexer.task.hadoopWorkingPath=/druid/data/hadoop-working-path + druid.indexer.task.defaultHadoopCoordinates=[\"org.apache.hadoop:hadoop-client:2.7.3\"] + extra.jvm.options: |- + -Xmx1G + -Xms1G + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp2 + + volumeMounts: + - mountPath: /druid/data + name: data-volume + resources: + requests: + memory: "3Gi" + cpu: "4" + limits: + memory: "3Gi" + cpu: "4" From 322751f4a994257440c62f999141da91fe58c926 Mon Sep 17 00:00:00 2001 From: Naya Chen Date: Wed, 2 Nov 2022 21:33:08 +0000 Subject: [PATCH 3/3] Update controllers/druid/testdata/druid-test-cr-start-command.yaml Co-authored-by: AdheipSingh <34169002+AdheipSingh@users.noreply.github.com> --- controllers/druid/testdata/druid-test-cr-start-command.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/druid/testdata/druid-test-cr-start-command.yaml b/controllers/druid/testdata/druid-test-cr-start-command.yaml index ba3b56bd..b66d6c30 100644 --- a/controllers/druid/testdata/druid-test-cr-start-command.yaml +++ b/controllers/druid/testdata/druid-test-cr-start-command.yaml @@ -7,7 +7,7 @@ spec: image: himanshu01/druid:druid-0.12.0-1 startScript: sh entryArg: echo 'Hello World' - DruidScript: druid-test.sh + druidScript: druid-test.sh podAnnotations: key1: value1 key2: value2