diff --git a/.env b/.env index d79c25287..12038fdfc 100644 --- a/.env +++ b/.env @@ -6,4 +6,4 @@ KUBECTL_VERSION=v1.29.1 AZ_CLI_VERSION=2.30.0 EKSCTL_VERSION=v0.143.0 EKS_CLUSTER_K8_VERSION=1.27 -SPLUNK_ENTERPRISE_RELEASE_IMAGE=splunk/splunk:9.1.3 +SPLUNK_ENTERPRISE_RELEASE_IMAGE=splunk/splunk:9.1.3 \ No newline at end of file diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 7a36b8fae..9271ca640 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -4,6 +4,7 @@ on: branches: - develop - main + - feature** jobs: build-operator-image: runs-on: ubuntu-latest diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 91537feac..065743958 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -1,7 +1,7 @@ name: Nightly Integration Test WorkFlow on: schedule: - - cron: "0 06 * * *" + - cron: "0 06 * * 0" jobs: build-operator-image: runs-on: ubuntu-latest diff --git a/bundle/manifests/splunk-operator.clusterserviceversion.yaml b/bundle/manifests/splunk-operator.clusterserviceversion.yaml index 31a34c49f..8ebe30c0f 100644 --- a/bundle/manifests/splunk-operator.clusterserviceversion.yaml +++ b/bundle/manifests/splunk-operator.clusterserviceversion.yaml @@ -111,7 +111,7 @@ metadata: capabilities: Seamless Upgrades categories: Big Data, Logging & Tracing, Monitoring, Security, AI/Machine Learning containerImage: splunk/splunk-operator@sha256:c4e0d314622699496f675760aad314520d050a66627fdf33e1e21fa28ca85d50 - createdAt: "2024-01-22T21:05:16Z" + createdAt: "2024-02-20T18:45:06Z" description: The Splunk Operator for Kubernetes enables you to quickly and easily deploy Splunk Enterprise on your choice of private or public cloud provider. The Operator simplifies scaling and management of Splunk Enterprise by automating @@ -120,7 +120,7 @@ metadata: operators.operatorframework.io/builder: operator-sdk-v1.31.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/splunk/splunk-operator - name: splunk-operator.v2.2.1 + name: splunk-operator.v2.5.1 namespace: placeholder spec: apiservicedefinitions: {} @@ -815,7 +815,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name - image: docker.io/splunk/splunk-operator:2.5.0 + image: docker.io/splunk/splunk-operator:2.5.1 imagePullPolicy: Always livenessProbe: httpGet: @@ -929,5 +929,5 @@ spec: relatedImages: - image: docker.io/splunk/splunk:9.1.3 name: splunk-enterprise - replaces: splunk-operator.v2.2.0 - version: 2.2.1 + replaces: splunk-operator.v2.5.0 + version: 2.5.1 diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index d367eb1a7..8e92ce43b 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -17,4 +17,4 @@ kind: Kustomization images: - name: controller newName: docker.io/splunk/splunk-operator - newTag: 2.5.0 + newTag: 2.5.1 diff --git a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml index 5cddc8572..c0d9b1fa9 100644 --- a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml @@ -12,7 +12,7 @@ metadata: administrative workflows using Kubernetes best practices. olm.properties: '[{"type": "olm.maxOpenShiftVersion", "value": "4.9"}]' repository: https://github.com/splunk/splunk-operator - name: splunk-operator.v2.2.1 + name: splunk-operator.v2.5.1 namespace: placeholder spec: apiservicedefinitions: {} @@ -274,5 +274,5 @@ spec: provider: name: Splunk Inc. url: www.splunk.com - replaces: splunk-operator.v2.2.0 - version: 2.2.1 + replaces: splunk-operator.v2.5.0 + version: 2.5.1 diff --git a/docs/AppFramework.md b/docs/AppFramework.md index f79da2bda..c842039c7 100644 --- a/docs/AppFramework.md +++ b/docs/AppFramework.md @@ -1,6 +1,6 @@ # App Framework Resource Guide -The Splunk Operator provides support for Splunk app and add-on deployment using the App Framework. The App Framework specification supports configuration management using the Splunk Enterprise cluster and standalone [custom resources](https://splunk.github.io/splunk-operator/CustomResources.html) (CR). +The Splunk Operator provides support for Splunk app and add-on deployment using the App Framework. The App Framework specification supports configuration management using the Splunk Enterprise cluster and standalone [custom resources](https://splunk.github.io/splunk-operator/CustomResources.html) (CR). ## Prerequisites @@ -10,7 +10,7 @@ Utilizing the App Framework requires one of the following remote storage provide ### Prerequisites common to both remote storage providers * The App framework requires read-only access to the path used to host the apps. DO NOT give any other access to the operator to maintain the integrity of data in S3 bucket or Azure blob container. -* Splunk apps and add-ons in a .tgz or .spl archive format. +* Splunk apps and add-ons in a .tgz or .spl archive format. * Connections to the remote object storage endpoint need to be secured using a minimum version of TLS 1.2. * A persistent storage volume and path for the Operator Pod. See [Add a persistent storage volume to the Operator pod](#add-a-persistent-storage-volume-to-the-operator-pod). @@ -25,7 +25,7 @@ Utilizing the App Framework requires one of the following remote storage provide Splunk apps and add-ons deployed or installed outside of the App Framework are not managed, and are unsupported. -Note: For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. +Note: For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. ## Examples of App Framework usage Following section shows examples of using App Framework for both remote data storages. First, the examples for S3 based remote object storage are given and then same examples are covered for Azure blob. The examples in both the cases have lot of commonalities and the places they differ are mainly in the values for `storageType`, `provider` and `endpoint`. There are also some differences in the authoriziation setup for using IAM /Managed Identity in both remote data storages. @@ -39,7 +39,7 @@ In this example, you'll deploy a Standalone CR with a remote storage volume, the 1. Confirm your remote storage volume path and URL. 2. Configure credentials to connect to remote store by: - * s3 based remote storage: + * s3 based remote storage: * Configuring an IAM role for the Operator and Splunk instance pods using a service account or annotations. * Or, create a Kubernetes Secret Object with the static storage credentials. * Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=AKIAIOSFODNN7EXAMPLE --from-literal=s3_secret_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLE_S3_SECRET_KEY` @@ -120,9 +120,9 @@ spec: 6. Apply the Custom Resource specification: `kubectl apply -f Standalone.yaml` -The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys them to the standalone instance path for local use. +The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys them to the standalone instance path for local use. -The App Framework maintains a checksum for each app or add-on archive file in the App Source location. The app name and checksum is recorded in the CR, and used to compare the deployed apps to the app archive files in the App Source location. The App Framework will scan for changes to the App Source folders using the polling interval, and deploy any updated apps to the instance. For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. +The App Framework maintains a checksum for each app or add-on archive file in the App Source location. The app name and checksum is recorded in the CR, and used to compare the deployed apps to the app archive files in the App Source location. The App Framework will scan for changes to the App Source folders using the polling interval, and deploy any updated apps to the instance. For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. By default, the App Framework polls the remote object storage location for new or changed apps at the `appsRepoPollIntervalSeconds` interval. To disable the interval check, and manage app updates manually, see the [Manual initiation of app management](#manual-initiation-of-app-management). @@ -135,7 +135,7 @@ This example describes the installation of apps on an Indexer Cluster and Cluste 1. Confirm your remote storage volume path and URL. 2. Configure credentials to connect to remote store by: - * s3 based remote storage: + * s3 based remote storage: * Configuring an IAM role for the Operator and Splunk instance pods using a service account or annotations. * Or, create a Kubernetes Secret Object with the static storage credentials. * Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=AKIAIOSFODNN7EXAMPLE --from-literal=s3_secret_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLE_S3_SECRET_KEY` @@ -145,7 +145,7 @@ This example describes the installation of apps on an Indexer Cluster and Cluste * Example: `kubectl create secret generic azureblob-secret --from-literal=azure_sa_name=mystorageaccount --from-literal=azure_sa_secret_key=wJalrXUtnFEMI/K7MDENG/EXAMPLE_AZURE_SHARED_ACCESS_KEY` 3. Create unique folders on the remote storage volume to use as App Source locations. - * An App Source is a folder on the remote storage volume containing a select subset of Splunk apps and add-ons. In this example, there are Splunk apps installed and run locally on the cluster manager, and select apps that will be distributed to all cluster peers by the cluster manager. + * An App Source is a folder on the remote storage volume containing a select subset of Splunk apps and add-ons. In this example, there are Splunk apps installed and run locally on the cluster manager, and select apps that will be distributed to all cluster peers by the cluster manager. * The apps are split across three folders named `networkApps`, `clusterBase`, and `adminApps`. The apps placed into `networkApps` and `clusterBase` are distributed to the cluster peers, but the apps in `adminApps` are for local use on the cluster manager instance only. 4. Copy your Splunk app or add-on archive files to the App Source. @@ -221,13 +221,13 @@ spec: ``` 6. Apply the Custom Resource specification: `kubectl apply -f ClusterManager.yaml` -The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the cluster manager instance for local use. +The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the cluster manager instance for local use. -The apps in the `networkApps` and `clusterBase` folders are deployed to the cluster manager for use on the cluster peers. The cluster manager is responsible for deploying those apps to the cluster peers. +The apps in the `networkApps` and `clusterBase` folders are deployed to the cluster manager for use on the cluster peers. The cluster manager is responsible for deploying those apps to the cluster peers. Note: The Splunk cluster peer restarts are triggered by the contents of the Splunk apps deployed, and are not initiated by the App Framework. -The App Framework maintains a checksum for each app or add-on archive file in the App Source location. The app name and checksum is recorded in the CR, and used to compare the deployed apps to the app archive files in the App Source location. The App Framework will scan for changes to the App Source folders using the polling interval, and deploy any updated apps to the instance. For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. +The App Framework maintains a checksum for each app or add-on archive file in the App Source location. The app name and checksum is recorded in the CR, and used to compare the deployed apps to the app archive files in the App Source location. The App Framework will scan for changes to the App Source folders using the polling interval, and deploy any updated apps to the instance. For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. By default, the App Framework polls the remote object storage location for new or changed apps at the `appsRepoPollIntervalSeconds` interval. To disable the interval check, and manage app updates manually, see the [Manual initiation of app management](#manual-initiation-of-app-management). @@ -240,7 +240,7 @@ This example describes the installation of apps on the Deployer and the Search H 1. Confirm your remote storage volume path and URL. 2. Configure credentials to connect to remote store by: - * s3 based remote storage: + * s3 based remote storage: * Configuring an IAM role for the Operator and Splunk instance pods using a service account or annotations. * Or, create a Kubernetes Secret Object with the static storage credentials. * Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=AKIAIOSFODNN7EXAMPLE --from-literal=s3_secret_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLE_S3_SECRET_KEY` @@ -251,15 +251,15 @@ This example describes the installation of apps on the Deployer and the Search H 3. Create unique folders on the remote storage volume to use as App Source locations. - * An App Source is a folder on the remote storage volume containing a select subset of Splunk apps and add-ons. In this example, there are Splunk apps installed and run locally on the Deployer, and select apps that will be distributed to all cluster search heads by the Deployer. + * An App Source is a folder on the remote storage volume containing a select subset of Splunk apps and add-ons. In this example, there are Splunk apps installed and run locally on the Deployer, and select apps that will be distributed to all cluster search heads by the Deployer. * The apps are split across three folders named `searchApps`, `machineLearningApps` and `adminApps`. The apps placed into `searchApps` and `machineLearningApps` are distributed to the search heads, but the apps in `adminApps` are for local use on the Deployer instance only. 4. Copy your Splunk app or add-on archive files to the App Source. * In this example, the Splunk apps for the search heads are located at `bucket-app-framework/shcLoc-us/searchAppsLoc/`, `bucket-app-framework/shcLoc-us/machineLearningAppsLoc/`, and the apps for the Deployer are located at `bucket-app-framework/shcLoc-us/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com` for s3 and https://mystorageaccount.blob.core.windows.net for azure blob. 5. Update the SearchHeadCluster CR specification, and append the volume, App Source configuration, and scope. - * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. - * For CRs where the Splunk Enterprise instance will deploy the apps to search heads, set the `scope:cluster`. The ClusterManager and SearchHeadCluster CRs support both cluster and local scopes. + * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. + * For CRs where the Splunk Enterprise instance will deploy the apps to search heads, set the `scope:cluster`. The ClusterManager and SearchHeadCluster CRs support both cluster and local scopes. * In this example, the Deployer will run some apps locally, and deploy other apps to the clustered search heads. The App Source folder `adminApps` contains Splunk apps that are installed and run on the Deployer, and will use a local scope. The apps in the App Source folders `searchApps` and `machineLearningApps` will be deployed from the Deployer to the search heads, and will use a cluster scope. Example using S3: SearchHeadCluster.yaml @@ -268,7 +268,7 @@ Example using S3: SearchHeadCluster.yaml apiVersion: enterprise.splunk.com/v4 kind: SearchHeadCluster metadata: - name: shc + name: shc finalizers: - enterprise.splunk.com/delete-pvc spec: @@ -302,7 +302,7 @@ Example using Azure blob: SearchHeadCluster.yaml apiVersion: enterprise.splunk.com/v4 kind: SearchHeadCluster metadata: - name: shc + name: shc finalizers: - enterprise.splunk.com/delete-pvc spec: @@ -330,13 +330,13 @@ spec: ``` 6. Apply the Custom Resource specification: `kubectl apply -f SearchHeadCluster.yaml` -The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the Deployer instance for local use. +The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the Deployer instance for local use. -The apps in the `searchApps` and `machineLearningApps` folders are deployed to the Deployer for use on the clustered search heads. The Deployer is responsible for deploying those apps to the search heads. +The apps in the `searchApps` and `machineLearningApps` folders are deployed to the Deployer for use on the clustered search heads. The Deployer is responsible for deploying those apps to the search heads. Note: The Splunk search head restarts are triggered by the contents of the Splunk apps deployed, and are not initiated by the App Framework. -The App Framework maintains a checksum for each app or add-on archive file in the App Source location. The app name and checksum is recorded in the CR, and used to compare the deployed apps to the app archive files in the App Source location. The App Framework will scan for changes to the App Source folders using the polling interval, and deploy any updated apps to the instance. For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. +The App Framework maintains a checksum for each app or add-on archive file in the App Source location. The app name and checksum is recorded in the CR, and used to compare the deployed apps to the app archive files in the App Source location. The App Framework will scan for changes to the App Source folders using the polling interval, and deploy any updated apps to the instance. For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. By default, the App Framework polls the remote object storage location for new or changed apps at the `appsRepoPollIntervalSeconds` interval. To disable the interval check, and manage app updates manually, see the [Manual initiation of app management](#manual-initiation-of-app-management). @@ -448,11 +448,11 @@ Here is a typical App framework configuration in a Custom Resource definition: `appSources` defines the name and scope of the appSource, the remote storage volume, and its location. * `name` uniquely identifies the App source configuration within a CR. This used locally by the Operator to identify the App source. -* `scope` defines the scope of the app to be installed. - * If the scope is `local`, the apps will be installed and run locally on the pod referred to by the CR. +* `scope` defines the scope of the app to be installed. + * If the scope is `local`, the apps will be installed and run locally on the pod referred to by the CR. * If the scope is `cluster`, the apps will be placed onto the configuration management node (Deployer, Cluster Manager) for deployment across the cluster referred to by the CR. * The cluster scope is only supported on CRs that manage cluster-wide app deployment. - + | CRD Type | Scope support | App Framework support | | :---------------- | :------------------------------------- | :-------------------- | | ClusterManager | cluster, local | Yes | @@ -463,11 +463,11 @@ Here is a typical App framework configuration in a Custom Resource definition: | IndexerCluster | N/A | No | * `volume` refers to the remote storage volume name configured under the `volumes` stanza (see previous section.) -* `location` helps configure the specific appSource present under the `path` within the `volume`, containing the apps to be installed. +* `location` helps configure the specific appSource present under the `path` within the `volume`, containing the apps to be installed. ### appsRepoPollIntervalSeconds -If app framework is enabled, the Splunk Operator creates a namespace scoped configMap named **splunk-\-manual-app-update**, which is used to manually trigger the app updates. The App Framework uses the polling interval `appsRepoPollIntervalSeconds` to check for additional apps, or modified apps on the remote object storage. +If app framework is enabled, the Splunk Operator creates a namespace scoped configMap named **splunk-\-manual-app-update**, which is used to manually trigger the app updates. The App Framework uses the polling interval `appsRepoPollIntervalSeconds` to check for additional apps, or modified apps on the remote object storage. When `appsRepoPollIntervalSeconds` is set to `0` for a CR, the App Framework will not perform a check until the configMap `status` field is updated manually. See [Manual initiation of app management](#manual_initiation_of_app_management). @@ -542,7 +542,7 @@ spec: serviceAccountName: splunk-operator containers: - name: splunk-operator - image: "docker.io/splunk/splunk-operator:2.5.0" + image: "docker.io/splunk/splunk-operator:2.5.1" volumeMounts: - mountPath: /opt/splunk/appframework/ name: app-staging @@ -573,7 +573,7 @@ You can prevent the App Framework from automatically polling the remote storage When you're ready to initiate an app check using the App Framework, manually update the `status` field in the configMap for that CR type to `on`. The 'status' field defaults to 'off'. -For example, you deployed one Standalone CR with app framework enabled. +For example, you deployed one Standalone CR with app framework enabled. ``` kubectl get standalone @@ -620,15 +620,15 @@ The App Framework does not preview, analyze, verify versions, or enable Splunk A 1. The App Framework has no support to remove an app or add-on once it’s been deployed. To disable an app, update the archive contents located in the App Source, and set the app.conf state to disabled. -2. The App Framework defines one worker per CR type. For example, if you have multiple clusters receiveing app updates, a delay while managing one cluster will delay the app updates to the other cluster. +2. The App Framework defines one worker per CR type. For example, if you have multiple clusters receiveing app updates, a delay while managing one cluster will delay the app updates to the other cluster. ## Setup Azure bob access with Managed Indentity -Azure Managed identities can be used to provide IAM access to the blobs. With managed identities, the AKS nodes, that host the pods, can retrieve a OAuth token that provides authorization for the Splunk operator pod to read the app packages stored in the Azure Storage account. The key point here is that the AKS node is associated with a Managed Identity and this managed identity is given a `role` for read access called `Storage Blob Data Reader` to the azure storage account. +Azure Managed identities can be used to provide IAM access to the blobs. With managed identities, the AKS nodes, that host the pods, can retrieve a OAuth token that provides authorization for the Splunk operator pod to read the app packages stored in the Azure Storage account. The key point here is that the AKS node is associated with a Managed Identity and this managed identity is given a `role` for read access called `Storage Blob Data Reader` to the azure storage account. Here are the steps showing an example of assiging managed identity: -*Assumptions:* +*Assumptions:* Familiarize yourself with [AKS managed identity concepts](https://learn.microsoft.com/en-us/azure/aks/use-managed-identity) @@ -652,11 +652,11 @@ az aks create -g splunkOperatorResourceGroup -n splunkOperatorCluster --enable-m ``` az aks get-credentials --resource-group splunkOperatorResourceGroup --name splunkOperatorCluster ``` -4. Get the Kubelet user managed identity +4. Get the Kubelet user managed identity Run ``` -$ az identity list +$ az identity list ``` Find the section that has -agentpool under name @@ -677,7 +677,7 @@ That is look for the block that contains "name": "splunkOperatorCluster-agentpoo } ``` -Extract the principalId value from the outout above. Or you can use the following command to get the principalId +Extract the principalId value from the outout above. Or you can use the following command to get the principalId ``` $ az identity show --name --resource-group "" --query 'principalId' --output tsv ``` diff --git a/docs/ChangeLog.md b/docs/ChangeLog.md index 7681fe8df..0f55a5553 100644 --- a/docs/ChangeLog.md +++ b/docs/ChangeLog.md @@ -1,5 +1,21 @@ # Splunk Operator for Kubernetes Change Log +## 2.5.1 (2024-02-20) + +CSPL-2532: fix for leader election lost issue (#1281) + +### Supported Splunk Version +>| Splunk Version| +>| --- | +>| 9.0.8 | +>| 9.1.3 | + +### Supported Kubernetes Version +>| Kubernetes Version| +>| --- | +>| 1.25+ | + + ## 2.5.0 (2024-02-05) CSPL-2155: Support for Level-2 Upgrade Strategy in Splunk Operator diff --git a/docs/Install.md b/docs/Install.md index 8ac60b8bd..0cc41f81e 100644 --- a/docs/Install.md +++ b/docs/Install.md @@ -7,7 +7,7 @@ If you want to customize the installation of the Splunk Operator, download a copy of the installation YAML locally, and open it in your favorite editor. ``` -wget -O splunk-operator-cluster.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.0/splunk-operator-cluster.yaml +wget -O splunk-operator-cluster.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.1/splunk-operator-cluster.yaml ``` ## Default Installation @@ -17,7 +17,7 @@ Based on the file used Splunk Operator can be installed cluster-wide or namespac By installing `splunk-operator-cluster.yaml` Operator will watch all the namespaces of your cluster for splunk enterprise custom resources ``` -wget -O splunk-operator-cluster.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.0/splunk-operator-cluster.yaml +wget -O splunk-operator-cluster.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.1/splunk-operator-cluster.yaml kubectl apply -f splunk-operator-cluster.yaml ``` @@ -44,10 +44,10 @@ If Splunk Operator is installed clusterwide and user wants to manage multiple na ## Install operator to watch single namespace with restrictive permission -In order to install operator with restrictive permission to watch only single namespace use [splunk-operator-namespace.yaml](https://github.com/splunk/splunk-operator/releases/download/2.5.0/splunk-operator-namespace.yaml). This will create Role and Role-Binding to only watch single namespace. By default operator will be installed in `splunk-operator` namespace, user can edit the file to change the namespace +In order to install operator with restrictive permission to watch only single namespace use [splunk-operator-namespace.yaml](https://github.com/splunk/splunk-operator/releases/download/2.5.1/splunk-operator-namespace.yaml). This will create Role and Role-Binding to only watch single namespace. By default operator will be installed in `splunk-operator` namespace, user can edit the file to change the namespace ``` -wget -O splunk-operator-namespace.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.0/splunk-operator-namespace.yaml +wget -O splunk-operator-namespace.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.1/splunk-operator-namespace.yaml kubectl apply -f splunk-operator-namespace.yaml ``` diff --git a/docs/README.md b/docs/README.md index e6feea745..33daf6dfb 100644 --- a/docs/README.md +++ b/docs/README.md @@ -113,12 +113,12 @@ For production environments, we are requiring the use of Splunk SmartStore. As a A Kubernetes cluster administrator can install and start the Splunk Operator for specific namespace by running: ``` -kubectl apply -f https://github.com/splunk/splunk-operator/releases/download/2.5.0/splunk-operator-namespace.yaml --server-side --force-conflicts +kubectl apply -f https://github.com/splunk/splunk-operator/releases/download/2.5.1/splunk-operator-namespace.yaml --server-side --force-conflicts ``` A Kubernetes cluster administrator can install and start the Splunk Operator for cluster-wide by running: ``` -kubectl apply -f https://github.com/splunk/splunk-operator/releases/download/2.5.0/splunk-operator-cluster.yaml --server-side --force-conflicts +kubectl apply -f https://github.com/splunk/splunk-operator/releases/download/2.5.1/splunk-operator-cluster.yaml --server-side --force-conflicts ``` The [Advanced Installation Instructions](Install.md) page offers guidance for advanced configurations, including the use of private image registries, installation at cluster scope, and installing the Splunk Operator as a user who is not a Kubernetes administrator. Users of Red Hat OpenShift should review the [Red Hat OpenShift](OpenShift.md) page. diff --git a/docs/SplunkOperatorUpgrade.md b/docs/SplunkOperatorUpgrade.md index 5e923ceb9..46e3a0094 100644 --- a/docs/SplunkOperatorUpgrade.md +++ b/docs/SplunkOperatorUpgrade.md @@ -1,5 +1,6 @@ # How to upgrade Splunk Operator and Splunk Enterprise Deployments +To upgrade the Splunk Operator for Kubernetes, you will overwrite the prior Operator release with the latest version. Once the lastest version of `splunk-operator-namespace.yaml` ([see below](#upgrading-splunk-operator-and-splunk-operator-deployment)) is applied the CRD's are updated and Operator deployment is updated with newer version of Splunk Operator image. Any new spec defined by the operator will be applied to the pods managed by Splunk Operator for Kubernetes. To upgrade the Splunk Operator for Kubernetes, you will overwrite the prior Operator release with the latest version. Once the lastest version of `splunk-operator-namespace.yaml` ([see below](#upgrading-splunk-operator-and-splunk-operator-deployment)) is applied the CRD's are updated and Operator deployment is updated with newer version of Splunk Operator image. Any new spec defined by the operator will be applied to the pods managed by Splunk Operator for Kubernetes. ​ A Splunk Operator for Kubernetes upgrade might include support for a later version of the Splunk Enterprise Docker image. In that scenario, after the Splunk Operator completes its upgrade, the pods managed by Splunk Operator for Kubernetes will be restarted using the latest Splunk Enterprise Docker image. @@ -11,6 +12,7 @@ A Splunk Operator for Kubernetes upgrade might include support for a later versi * Before you upgrade, review the Splunk Operator [change log](https://github.com/splunk/splunk-operator/releases) page for information on changes made in the latest release. The Splunk Enterprise Docker image compatibility is noted in each release version. ​ * If the Splunk Enterprise Docker image changes, review the Splunk Enterprise [Upgrade Readme](https://docs.splunk.com/Documentation/Splunk/latest/Installation/AboutupgradingREADTHISFIRST) page before upgrading. +* If the Splunk Enterprise Docker image changes, review the Splunk Enterprise [Upgrade Readme](https://docs.splunk.com/Documentation/Splunk/latest/Installation/AboutupgradingREADTHISFIRST) page before upgrading. ​ * For general information about Splunk Enterprise compatibility and the upgrade process, see [How to upgrade Splunk Enterprise](https://docs.splunk.com/Documentation/Splunk/latest/Installation/HowtoupgradeSplunk). ​ @@ -25,7 +27,7 @@ A Splunk Operator for Kubernetes upgrade might include support for a later versi 1. Download the latest Splunk Operator installation yaml file. ​ ``` -wget -O splunk-operator-namespace.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.0/splunk-operator-namespace.yaml +wget -O splunk-operator-namespace.yaml https://github.com/splunk/splunk-operator/releases/download/2.5.1/splunk-operator-namespace.yaml ``` ​ 2. (Optional) Review the file and update it with your specific customizations used during your install. @@ -48,8 +50,9 @@ If a Splunk Operator release changes the custom resource (CRD) API version, the ### Upgrading Splunk Enterprise Docker Image with the Operator Upgrade -Splunk Operator follows the upgrade path steps mentioned in [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/9.1.2/Installation/HowtoupgradeSplunk). If a Splunk Operator release includes an updated Splunk Enterprise Docker image, the operator upgrade will also initiate pod restart using the latest Splunk Enterprise Docker image. To follow the best practices described under the [General Process to Upgrade the Splunk Enterprise], a recommeded upgrade path is followed while initiating pod restarts of different Splunk Instances. At each step, if a particular CR instance exists, a certain flow is imposed to ensure that each instance is updated in the correct order. After an instance is upgraded, the Operator verifies if the upgrade was successful and all the components are working as expected. If any unexpected behaviour is detected, the process is terminated. +Splunk Operator follows the upgrade path steps mentioned in [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/9.1.3/Installation/HowtoupgradeSplunk). If a Splunk Operator release includes an updated Splunk Enterprise Docker image, the operator upgrade will also initiate pod restart using the latest Splunk Enterprise Docker image. To follow the best practices described under the [General Process to Upgrade the Splunk Enterprise], a recommeded upgrade path is followed while initiating pod restarts of different Splunk Instances. At each step, if a particular CR instance exists, a certain flow is imposed to ensure that each instance is updated in the correct order. After an instance is upgraded, the Operator verifies if the upgrade was successful and all the components are working as expected. If any unexpected behaviour is detected, the process is terminated. +If a Splunk Operator release changes the custom resource (CRD) API version, the administrator is responsible for updating their Custom Resource specification to reference the latest CRD API version. ## Steps to Upgrade from 1.0.5 or older version to latest @@ -145,11 +148,14 @@ imagePullPolicy: IfNotPresent This is an example of the process followed by the Splunk Operator if the operator version is upgraded and a later Splunk Enterprise Docker image is available: ​ -1. A new Splunk Operator pod will be created, and the existing operator pod will be terminated. -3. Any existing License Manager, Standalone, Monitoring console, Cluster manager, Search Head, ClusterManager, Indexer pods will be terminated to be redeployed with the upgraded spec. -4. Splunk Operator follows the upgrade path steps mentioned in Splunk documentation. The termination and redeployment of the pods happen in a particular order based on a recommended upgrade path. -5. After a ClusterManager pod is restarted, the Indexer Cluster pods which are connected to it are terminated and redeployed. -6. After all pods in the Indexer cluster and Search head cluster are redeployed, the Monitoring Console pod is terminated and redeployed. -7. Each pod upgrade is verified to ensure the process was successful and everything is working as expected. + +1. Initiation of a new Splunk Operator pod will lead to the termination of the existing operator pod. +2. All existing License Manager, Standalone, Monitoring Console, Cluster Manager, Search Head, ClusterManager, and Indexer pods will undergo termination for subsequent redeployment with upgraded specifications. +3. The Splunk Operator adheres to the upgrade path steps delineated in the Splunk documentation. Pod termination and redeployment occur in a specific order based on the recommended upgrade path. +4. Standalone or License manager will be the first to be redeployed +5. Next ClusterManager pod will be redeployed, next the Monitoring Console pod undergoes termination and redeployment. +6. Subsequently, the Search Head cluster pods connected to it are terminated and redeployed. +7. Afterwards, all pods in the Indexer cluster are redeployed sequentially. In cases where multi-zone Indexer clusters are utilized, they undergo redeployment zone by zone. +8. Each pod upgrade is meticulously verified to ensure a successful process, with thorough checks conducted to confirm that everything is functioning as expected. * Note: If there are multiple pods per Custom Resource, the pods are terminated and re-deployed in a descending order with the highest numbered pod going first diff --git a/docs/index.yaml b/docs/index.yaml index da55e317a..6ab024c75 100644 --- a/docs/index.yaml +++ b/docs/index.yaml @@ -1,9 +1,29 @@ apiVersion: v1 entries: splunk-enterprise: + - apiVersion: v2 + appVersion: 2.5.1 + created: "2024-02-20T10:52:46.032948-08:00" + dependencies: + - condition: splunk-operator.enabled + name: splunk-operator + repository: file://splunk-operator/helm-chart/splunk-operator + version: 2.5.1 + description: A Helm chart for Splunk Enterprise managed by the Splunk Operator + digest: 089e62bd7c5054b29f6c7cf6a9e72c4b3731debe90fe02ee3fc1eb10c5d2f455 + maintainers: + - email: vivekr@splunk.com + name: Vivek Reddy + - email: akondur@splunk.com + name: Arjun Kondur + name: splunk-enterprise + type: application + urls: + - https://splunk.github.io/splunk-operator/splunk-enterprise-2.5.1.tgz + version: 2.5.1 - apiVersion: v2 appVersion: 2.5.0 - created: "2024-01-22T12:51:00.460454-08:00" + created: "2024-02-20T10:52:45.982924-08:00" dependencies: - condition: splunk-operator.enabled name: splunk-operator @@ -23,7 +43,7 @@ entries: version: 2.5.0 - apiVersion: v2 appVersion: 2.4.0 - created: "2024-01-22T12:51:00.419046-08:00" + created: "2024-02-20T10:52:45.941086-08:00" dependencies: - condition: splunk-operator.enabled name: splunk-operator @@ -45,7 +65,7 @@ entries: version: 2.4.0 - apiVersion: v2 appVersion: 2.3.0 - created: "2024-01-22T12:51:00.387561-08:00" + created: "2024-02-20T10:52:45.912109-08:00" dependencies: - condition: splunk-operator.enabled name: splunk-operator @@ -67,7 +87,7 @@ entries: version: 2.3.0 - apiVersion: v2 appVersion: 2.2.1 - created: "2024-01-22T12:51:00.373328-08:00" + created: "2024-02-20T10:52:45.896417-08:00" dependencies: - condition: splunk-operator.enabled name: splunk-operator @@ -82,7 +102,7 @@ entries: version: 2.2.1 - apiVersion: v2 appVersion: 2.2.0 - created: "2024-01-22T12:51:00.357163-08:00" + created: "2024-02-20T10:52:45.881394-08:00" dependencies: - condition: splunk-operator.enabled name: splunk-operator @@ -97,7 +117,7 @@ entries: version: 2.2.0 - apiVersion: v2 appVersion: 2.1.0 - created: "2024-01-22T12:51:00.330725-08:00" + created: "2024-02-20T10:52:45.8558-08:00" dependencies: - condition: splunk-operator.enabled name: splunk-operator @@ -111,9 +131,24 @@ entries: - https://splunk.github.io/splunk-operator/splunk-enterprise-1.0.0.tgz version: 1.0.0 splunk-operator: + - apiVersion: v2 + appVersion: 2.5.1 + created: "2024-02-20T10:52:46.127737-08:00" + description: A Helm chart for the Splunk Operator for Kubernetes + digest: 5c90889e175bbfc79cbb7f83bf213de43a46c4d688574d04ff82aa16dcd8681a + maintainers: + - email: vivekr@splunk.com + name: Vivek Reddy + - email: akondur@splunk.com + name: Arjun Kondur + name: splunk-operator + type: application + urls: + - https://splunk.github.io/splunk-operator/splunk-operator-2.5.1.tgz + version: 2.5.1 - apiVersion: v2 appVersion: 2.5.0 - created: "2024-01-22T12:51:00.541778-08:00" + created: "2024-02-20T10:52:46.116065-08:00" description: A Helm chart for the Splunk Operator for Kubernetes digest: ed93f8fac421f92cfdbfd043ec27911a07ec7db2c05b4efc3137cef4f2bfca4a maintainers: @@ -128,7 +163,7 @@ entries: version: 2.5.0 - apiVersion: v2 appVersion: 2.4.0 - created: "2024-01-22T12:51:00.527611-08:00" + created: "2024-02-20T10:52:46.101936-08:00" description: A Helm chart for the Splunk Operator for Kubernetes digest: 9d0377747e46df4bf4b9dbd447c9ff46c926bfe2c66fd07d6d27a61abb31cb42 maintainers: @@ -145,7 +180,7 @@ entries: version: 2.4.0 - apiVersion: v2 appVersion: 2.3.0 - created: "2024-01-22T12:51:00.513743-08:00" + created: "2024-02-20T10:52:46.088599-08:00" description: A Helm chart for the Splunk Operator for Kubernetes digest: 23e70ec4059bc92920d7d3adce3bff6b8aba0d5eb5d4c0efe225bf3b88d5b274 maintainers: @@ -162,7 +197,7 @@ entries: version: 2.3.0 - apiVersion: v2 appVersion: 2.2.1 - created: "2024-01-22T12:51:00.500321-08:00" + created: "2024-02-20T10:52:46.070858-08:00" description: A Helm chart for the Splunk Operator for Kubernetes digest: 8868b9ae2ebde0c667b13c97d71d904a31b5a9f2c803b199bc77324f1727e1fd name: splunk-operator @@ -172,7 +207,7 @@ entries: version: 2.2.1 - apiVersion: v2 appVersion: 2.2.0 - created: "2024-01-22T12:51:00.487851-08:00" + created: "2024-02-20T10:52:46.056944-08:00" description: A Helm chart for the Splunk Operator for Kubernetes digest: 49c72276bd7ff93465b0545d8b0814f684cade7d2cd191b6d73d4c3660bd1fb4 name: splunk-operator @@ -182,7 +217,7 @@ entries: version: 2.2.0 - apiVersion: v2 appVersion: 2.1.0 - created: "2024-01-22T12:51:00.473934-08:00" + created: "2024-02-20T10:52:46.044805-08:00" description: A Helm chart for the Splunk Operator for Kubernetes digest: 34e5463f8f5442655d05cb616b50391b738a0827b30d8440b4c7fce99a291d9a name: splunk-operator @@ -190,4 +225,4 @@ entries: urls: - https://splunk.github.io/splunk-operator/splunk-operator-1.0.0.tgz version: 1.0.0 -generated: "2024-01-22T12:51:00.315345-08:00" +generated: "2024-02-20T10:52:45.840335-08:00" diff --git a/docs/splunk-enterprise-2.5.1.tgz b/docs/splunk-enterprise-2.5.1.tgz new file mode 100644 index 000000000..caf562900 Binary files /dev/null and b/docs/splunk-enterprise-2.5.1.tgz differ diff --git a/docs/splunk-operator-2.5.1.tgz b/docs/splunk-operator-2.5.1.tgz new file mode 100644 index 000000000..80feefdcd Binary files /dev/null and b/docs/splunk-operator-2.5.1.tgz differ diff --git a/helm-chart/splunk-enterprise/Chart.yaml b/helm-chart/splunk-enterprise/Chart.yaml index e2247b3b8..8884eea37 100644 --- a/helm-chart/splunk-enterprise/Chart.yaml +++ b/helm-chart/splunk-enterprise/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 2.5.0 +version: 2.5.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "2.5.0" +appVersion: "2.5.1" maintainers: - name: Vivek Reddy email: vivekr@splunk.com @@ -29,6 +29,6 @@ maintainers: email: akondur@splunk.com dependencies: - name: splunk-operator - version: "2.5.0" + version: "2.5.1" repository: "file://splunk-operator/helm-chart/splunk-operator" condition: splunk-operator.enabled diff --git a/helm-chart/splunk-enterprise/charts/splunk-operator-2.5.1.tgz b/helm-chart/splunk-enterprise/charts/splunk-operator-2.5.1.tgz new file mode 100644 index 000000000..80feefdcd Binary files /dev/null and b/helm-chart/splunk-enterprise/charts/splunk-operator-2.5.1.tgz differ diff --git a/helm-chart/splunk-operator/Chart.yaml b/helm-chart/splunk-operator/Chart.yaml index 8748cf5bb..0433e7090 100644 --- a/helm-chart/splunk-operator/Chart.yaml +++ b/helm-chart/splunk-operator/Chart.yaml @@ -19,10 +19,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: "2.5.0" +version: "2.5.1" # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "2.5.0" \ No newline at end of file +appVersion: "2.5.1" \ No newline at end of file diff --git a/helm-chart/splunk-operator/values.yaml b/helm-chart/splunk-operator/values.yaml index 120420278..06f2e02b2 100644 --- a/helm-chart/splunk-operator/values.yaml +++ b/helm-chart/splunk-operator/values.yaml @@ -32,7 +32,7 @@ splunkOperator: # Splunk operator image and pull policy # reference: https://github.com/splunk/splunk-operator image: - repository: docker.io/splunk/splunk-operator:2.5.0 + repository: docker.io/splunk/splunk-operator:2.5.1 pullPolicy: IfNotPresent # Set image pull secrets to pull image from a private registry diff --git a/main.go b/main.go index 0f6acd36b..4a425ff06 100644 --- a/main.go +++ b/main.go @@ -19,6 +19,7 @@ package main import ( "flag" "os" + "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -66,14 +67,34 @@ func main() { var logEncoder string var logLevel int - flag.StringVar(&logEncoder, "logEncoder", "json", "log encoding ('json' or 'console')") + var leaseDuration time.Duration + var renewDeadline time.Duration + var leaseDurationSecond int + var renewDeadlineSecond int + + flag.StringVar(&logEncoder, "log-encoder", "json", "log encoding ('json' or 'console')") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") flag.BoolVar(&pprofActive, "pprof", true, "Enable pprof endpoint") - flag.IntVar(&logLevel, "loglevel", int(zapcore.InfoLevel), "set log level") + flag.IntVar(&logLevel, "log-level", int(zapcore.InfoLevel), "set log level") + flag.IntVar(&leaseDurationSecond, "lease-duration", int(leaseDurationSecond), "manager lease duration in seconds") + flag.IntVar(&renewDeadlineSecond, "renew-duration", int(renewDeadlineSecond), "manager renew duration in seconds") + + // see https://github.com/operator-framework/operator-sdk/issues/1813 + if leaseDurationSecond < 30 { + leaseDuration = 30 * time.Second + } else { + leaseDuration = time.Duration(leaseDurationSecond) * time.Second + } + + if renewDeadlineSecond < 20 { + renewDeadline = 20 * time.Second + } else { + renewDeadline = time.Duration(renewDeadlineSecond) * time.Second + } opts := zap.Options{ Development: true, @@ -92,6 +113,8 @@ func main() { HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "270bec8c.splunk.com", + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), config.ManagerOptionsWithNamespaces(setupLog, options))