Skip to content

Commit

Permalink
Merge pull request #101 from scality/improvement/COSI-77-re-use-contr…
Browse files Browse the repository at this point in the history
…oller-namespace

COSI-77: Use same namespace in COSI driver as defined by CNCF (in COSI controller)
  • Loading branch information
anurag4DSB authored Jan 14, 2025
2 parents f028105 + 08590af commit d2c62e4
Show file tree
Hide file tree
Showing 23 changed files with 39 additions and 39 deletions.
2 changes: 1 addition & 1 deletion .github/scripts/capture_k8s_logs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ mkdir -p logs/kind_cluster_logs
LOG_FILE_PATH=".github/e2e_tests/artifacts/logs/kind_cluster_logs"
mkdir -p "$(dirname "$LOG_FILE_PATH")" # Ensure the log directory exists
# Define namespaces to capture logs from
namespaces=("default" "scality-object-storage")
namespaces=("default" "container-object-storage-system")

# Loop through specified namespaces, pods, and containers
for namespace in "${namespaces[@]}"; do
Expand Down
6 changes: 3 additions & 3 deletions .github/scripts/cleanup_cosi_resources.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@ log_and_run() {

log_and_run echo "Removing COSI driver manifests and namespace..."
log_and_run kubectl delete -k . || { echo "COSI driver manifests not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete namespace scality-object-storage || { echo "Namespace scality-object-storage not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete namespace container-object-storage-system || { echo "Namespace container-object-storage-system not found." | tee -a "$LOG_FILE"; }

log_and_run echo "Verifying namespace deletion..."
if kubectl get namespace scality-object-storage &>/dev/null; then
echo "Warning: Namespace scality-object-storage was not deleted." | tee -a "$LOG_FILE"
if kubectl get namespace container-object-storage-system &>/dev/null; then
echo "Warning: Namespace container-object-storage-system was not deleted." | tee -a "$LOG_FILE"
exit 1
fi

Expand Down
2 changes: 1 addition & 1 deletion .github/scripts/e2e_tests_brownfield_use_case.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ SECRET_NAME="brownfield-bucket-secret"
IAM_ENDPOINT="http://$HOST_IP:8600"
S3_ENDPOINT="http://$HOST_IP:8000"
BUCKET_NAME="brownfield-bucket"
NAMESPACE="scality-object-storage"
NAMESPACE="container-object-storage-system"
REGION="us-west-1"

# Error handling function
Expand Down
2 changes: 1 addition & 1 deletion .github/scripts/e2e_tests_metrics.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ set -e
LOG_FILE=".github/e2e_tests/artifacts/logs/e2e_tests/metrics_service.log"
mkdir -p "$(dirname "$LOG_FILE")"

NAMESPACE="scality-object-storage"
NAMESPACE="container-object-storage-system"
SERVICE="scality-cosi-driver-metrics"
LOCAL_PORT=8080
TARGET_PORT=8080
Expand Down
4 changes: 2 additions & 2 deletions .github/scripts/setup_cosi_resources.sh
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ fi

# Step 6: Verify COSI driver Pod Status
log_and_run echo "Verifying COSI driver Pod status..."
if ! kubectl wait --namespace scality-object-storage --for=condition=ready pod --selector=app.kubernetes.io/name=scality-cosi-driver --timeout=30s; then
if ! kubectl wait --namespace container-object-storage-system --for=condition=ready pod --selector=app.kubernetes.io/name=scality-cosi-driver --timeout=30s; then
echo "Error: COSI driver Pod did not reach ready state." | tee -a "$LOG_FILE"
exit 1
fi
log_and_run kubectl get pods -n scality-object-storage
log_and_run kubectl get pods -n container-object-storage-system

log_and_run echo "COSI setup completed successfully."
2 changes: 1 addition & 1 deletion .github/scripts/verify_helm_install.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -e

NAMESPACE="scality-object-storage"
NAMESPACE="container-object-storage-system"

echo "Verifying Helm installation..."

Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/helm-validation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,14 @@ jobs:
- name: Install Scality COSI Helm Chart
run: |
helm install scality-cosi-driver ./helm/scality-cosi-driver \
--namespace scality-object-storage \
--namespace container-object-storage-system \
--create-namespace \
--set image.tag=latest \
--set traces.otel_stdout=true
- name: Print all resources in scality-object-storage namespace
- name: Print all resources in container-object-storage-system namespace
run: |
kubectl get all -n scality-object-storage
kubectl get all -n container-object-storage-system
- name: Verify Helm Installation
run: |
Expand All @@ -94,6 +94,6 @@ jobs:

- name: Cleanup Helm Release and Namespace
run: |
helm uninstall scality-cosi-driver -n scality-object-storage
kubectl delete namespace scality-object-storage
helm uninstall scality-cosi-driver -n container-object-storage-system
kubectl delete namespace container-object-storage-system
if: always()
2 changes: 1 addition & 1 deletion cosi-examples/brownfield/bucket.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: Bucket
metadata:
name: brownfield-bucket # should be same as bucket name
namespace: scality-object-storage
namespace: container-object-storage-system
spec:
bucketClaim: {}
bucketClassName: brownfield-bucket-class
Expand Down
2 changes: 1 addition & 1 deletion cosi-examples/brownfield/bucketaccess.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketAccess
metadata:
name: brownfield-bucket-access
namespace: scality-object-storage
namespace: container-object-storage-system
spec:
bucketAccessClassName: brownfield-bucket-access-class
bucketClaimName: brownfield-bucket-claim
Expand Down
2 changes: 1 addition & 1 deletion cosi-examples/brownfield/bucketaccessclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ kind: BucketAccessClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: brownfield-bucket-access-class
namespace: scality-object-storage
namespace: container-object-storage-system
driverName: cosi.scality.com
authenticationType: KEY
parameters:
Expand Down
2 changes: 1 addition & 1 deletion cosi-examples/brownfield/bucketclaim.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketClaim
metadata:
name: brownfield-bucket-claim
namespace: scality-object-storage
namespace: container-object-storage-system
spec:
bucketClassName: brownfield-bucket-class
existingBucketName: brownfield-bucket # name of Bucket object
Expand Down
2 changes: 1 addition & 1 deletion cosi-examples/brownfield/bucketclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketClass
metadata:
name: brownfield-bucket-class
namespace: scality-object-storage
namespace: container-object-storage-system
driverName: cosi.scality.com
deletionPolicy: Delete
parameters:
Expand Down
6 changes: 3 additions & 3 deletions docs/development/remote-debugging-golang-on-kubernetes.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ kubectl apply -k kustomize/overlays/debug
Wait until the pod is ready to ensure the deployment succeeded:

```bash
kubectl wait --namespace scality-object-storage --for=condition=ready pod --selector=app.kubernetes.io/name=scality-cosi-driver --timeout=120s
kubectl wait --namespace container-object-storage-system --for=condition=ready pod --selector=app.kubernetes.io/name=scality-cosi-driver --timeout=120s
```

---
Expand All @@ -54,13 +54,13 @@ kubectl wait --namespace scality-object-storage --for=condition=ready pod --sele
Identify the pod name for the COSI driver:

```bash
kubectl get pods -n scality-object-storage
kubectl get pods -n container-object-storage-system
```

Forward port `2345` from the Kubernetes pod to your local machine to connect VS Code to the Delve debugger:

```bash
kubectl port-forward -n scality-object-storage pod/<pod-name> 2345:2345
kubectl port-forward -n container-object-storage-system pod/<pod-name> 2345:2345
```

Replace `<pod-name>` with the actual name of the pod.
Expand Down
4 changes: 2 additions & 2 deletions docs/installation/install-helm.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ This guide provides step-by-step instructions for installing the Scality COSI Dr
```bash
git clone https://github.com/scality/cosi-driver.git
cd cosi-driver
helm install scality-cosi-driver ./helm/scality-cosi-driver --namespace scality-object-storage --create-namespace --set image.tag=0.1.0
helm install scality-cosi-driver ./helm/scality-cosi-driver --namespace container-object-storage-system --create-namespace --set image.tag=0.1.0
```

### Package locally and install
Expand All @@ -45,7 +45,7 @@ This guide provides step-by-step instructions for installing the Scality COSI Dr
git clone https://github.com/scality/cosi-driver.git
cd cosi-driver
helm package ./helm/scality-cosi-driver --version 0.1.0
helm install scality-cosi-driver ./scality-cosi-driver-0.1.0.tgz --namespace scality-object-storage --create-namespace --set image.tag=0.1.0
helm install scality-cosi-driver ./scality-cosi-driver-0.1.0.tgz --namespace container-object-storage-system --create-namespace --set image.tag=0.1.0
```

### Install from OCI Registry with Helm
Expand Down
6 changes: 3 additions & 3 deletions helm/scality-cosi-driver/templates/rbac.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: scality-object-storage-provisioner-role
name: scality-cosi-driver-provisioner-role
rules:
- apiGroups: ["objectstorage.k8s.io"]
resources: ["buckets", "bucketaccesses", "bucketclaims", "bucketaccessclasses"]
Expand All @@ -17,12 +17,12 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: scality-object-storage-provisioner-role-binding
name: scality-cosi-driver-provisioner-role-binding
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.name }}
namespace: {{ .Values.namespace }}
roleRef:
kind: ClusterRole
name: scality-object-storage-provisioner-role
name: scality-cosi-driver-provisioner-role
apiGroup: rbac.authorization.k8s.io
4 changes: 2 additions & 2 deletions helm/scality-cosi-driver/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@ logLevels:
sidecar: "2"


namespace: scality-object-storage
namespace: container-object-storage-system
fullnameOverride: scality-cosi-driver


serviceAccount:
name: scality-object-storage-provisioner
name: scality-cosi-driver-provisioner
create: true

metrics:
Expand Down
2 changes: 1 addition & 1 deletion kustomize/base/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ spec:
app.kubernetes.io/version: main
app.kubernetes.io/managed-by: kustomize
spec:
serviceAccountName: scality-object-storage-provisioner
serviceAccountName: scality-cosi-driver-provisioner
containers:
- name: scality-cosi-driver
image: ghcr.io/scality/cosi-driver:latest
Expand Down
2 changes: 1 addition & 1 deletion kustomize/base/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

namespace: scality-object-storage
namespace: container-object-storage-system

resources:
- namespace.yaml
Expand Down
2 changes: 1 addition & 1 deletion kustomize/base/namespace.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: scality-object-storage
name: scality-cosi-driver
8 changes: 4 additions & 4 deletions kustomize/base/rbac.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: scality-object-storage-provisioner-role
name: scality-cosi-driver-provisioner-role
rules:
- apiGroups: ["objectstorage.k8s.io"]
resources: ["buckets", "bucketaccesses", "bucketclaims", "bucketaccessclasses", "buckets/status", "bucketaccesses/status", "bucketclaims/status", "bucketaccessclasses/status"]
Expand All @@ -20,12 +20,12 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: scality-object-storage-provisioner-role-binding
name: scality-cosi-driver-provisioner-role-binding
subjects:
- kind: ServiceAccount
name: scality-object-storage-provisioner
name: scality-cosi-driver-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: scality-object-storage-provisioner-role
name: scality-cosi-driver-provisioner-role
apiGroup: rbac.authorization.k8s.io
2 changes: 1 addition & 1 deletion kustomize/base/serviceaccount.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: scality-object-storage-provisioner
name: scality-cosi-driver-provisioner
namespace: default
2 changes: 1 addition & 1 deletion kustomize/overlays/debug/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ spec:
app.kubernetes.io/version: main
app.kubernetes.io/managed-by: kustomize
spec:
serviceAccountName: scality-object-storage-provisioner
serviceAccountName: scality-cosi-driver-provisioner
containers:
- name: scality-cosi-driver
image: ghcr.io/scality/cosi-driver-delve:latest
Expand Down
2 changes: 1 addition & 1 deletion kustomize/overlays/dev/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

namespace: scality-object-storage
namespace: container-object-storage-system

configMapGenerator:
- name: scality-cosi-driver-properties
Expand Down

0 comments on commit d2c62e4

Please sign in to comment.