Skip to content

Commit

Permalink
Merge pull request #68 from scality/bugfix/COSI-74-remove-silent-errors
Browse files Browse the repository at this point in the history
COSI 74, COSI 75: Brownfield use case (re-use existing S3 buckets in Kube)
  • Loading branch information
anurag4DSB authored Dec 20, 2024
2 parents 52e13ca + a3e7337 commit a9cf51d
Show file tree
Hide file tree
Showing 18 changed files with 195 additions and 33 deletions.
8 changes: 4 additions & 4 deletions .github/scripts/cleanup_cosi_resources.sh
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,12 @@ for BUCKET_NAME in $BUCKET_NAMES; do
done

log_and_run echo "Deleting Bucket Access Class..."
log_and_run kubectl delete -f cosi-examples/bucketaccessclass.yaml --all || { echo "No BucketAccessClass resources found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -f cosi-examples/greenfield/bucketaccessclass.yaml --all || { echo "No BucketAccessClass resources found." | tee -a "$LOG_FILE"; }

log_and_run echo "Deleting Bucket Class and Bucket Claim..."
log_and_run kubectl delete -f cosi-examples/bucketclass.yaml || { echo "Bucket Class not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -f cosi-examples/bucketclaim.yaml || { echo "Bucket Claim not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -f cosi-examples/bucketclass-delete-on-claim-removal.yaml || { echo "Bucket Class not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -f cosi-examples/greenfield/bucketclass.yaml || { echo "Bucket Class not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -f cosi-examples/greenfield/bucketclaim.yaml || { echo "Bucket Claim not found." | tee -a "$LOG_FILE"; }
log_and_run kubectl delete -f cosi-examples/greenfield/bucketclass-deletion-policy.yaml || { echo "Bucket Class not found." | tee -a "$LOG_FILE"; }

log_and_run echo "Deleting s3-secret-for-cosi secret..."
log_and_run kubectl delete secret s3-secret-for-cosi --namespace=default || { echo "Secret s3-secret-for-cosi not found." | tee -a "$LOG_FILE"; }
Expand Down
119 changes: 119 additions & 0 deletions .github/scripts/e2e_tests_brownfield_use_case.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
#!/bin/bash
set -e

LOG_FILE=".github/e2e_tests/artifacts/logs/e2e_tests/brownfield.log"
mkdir -p "$(dirname "$LOG_FILE")"

HOST_IP=$(hostname -I | awk '{print $1}')
SECRET_NAME="brownfield-bucket-secret"
IAM_ENDPOINT="http://$HOST_IP:8600"
S3_ENDPOINT="http://$HOST_IP:8000"
BUCKET_NAME="brownfield-bucket"
NAMESPACE="scality-object-storage"
REGION="us-west-1"

# Error handling function
error_handler() {
echo "An error occurred during bucket creation tests. Check the log file for details." | tee -a "$LOG_FILE"
echo "Failed command: $BASH_COMMAND" | tee -a "$LOG_FILE"
exit 1
}

# Trap errors and call the error handler
trap 'error_handler' ERR

# Log command execution to the log file for debugging
log_and_run() {
"$@" 2>&1 | tee -a "$LOG_FILE"
}


# Create the bucket fir brownfield scenario
log_and_run echo "Creating bucket: $BUCKET_NAME"
log_and_run aws s3api create-bucket --bucket "$BUCKET_NAME" --region $REGION --endpoint-url "$S3_ENDPOINT"

# Check if the bucket exists
log_and_run echo "Checking if bucket $BUCKET_NAME exists"
aws --endpoint-url "$S3_ENDPOINT" s3api head-bucket --bucket "$BUCKET_NAME"
log_and_run echo "Bucket $BUCKET_NAME exists!"

log_and_run echo "Applying Bucket Class to use existing bucket..."
log_and_run kubectl apply -f cosi-examples/brownfield/bucketclass.yaml

log_and_run echo "Manually creating Bucket object with existing bucket..."
log_and_run kubectl apply -f cosi-examples/brownfield/bucket.yaml

log_and_run echo "Applying Bucket Claim referencing the Bucket object..."
log_and_run kubectl apply -f cosi-examples/brownfield/bucketclaim.yaml

log_and_run echo "Applying Bucket Access Class..."
log_and_run kubectl apply -f cosi-examples/brownfield/bucketaccessclass.yaml

log_and_run echo "Applying Bucket Access..."
log_and_run kubectl apply -f cosi-examples/brownfield/bucketaccess.yaml

log_and_run echo "Verifying brownfield-bucket-secret in the default namespace..."
SECRET_JSON="$(kubectl get secret "$SECRET_NAME" --namespace "$NAMESPACE" -o json)"

# Decode the Base64 encoded BucketInfo
BUCKET_INFO_BASE64="$(echo "$SECRET_JSON" | jq -r '.data.BucketInfo')"
BUCKET_INFO_JSON="$(echo "$BUCKET_INFO_BASE64" | base64 --decode)"

log_and_run echo "Decoded BucketInfo: $BUCKET_INFO_JSON"

# Extract values to verify
ACTUAL_BUCKET_NAME=$(echo "$BUCKET_INFO_JSON" | jq -r '.spec.bucketName')
ACTUAL_ENDPOINT=$(echo "$BUCKET_INFO_JSON" | jq -r '.spec.secretS3.endpoint')
ACTUAL_REGION=$(echo "$BUCKET_INFO_JSON" | jq -r '.spec.secretS3.region')
ACTUAL_ACCESS_KEY_ID=$(echo "$BUCKET_INFO_JSON" | jq -r '.spec.secretS3.accessKeyID')
ACTUAL_ACCESS_SECRET_KEY=$(echo "$BUCKET_INFO_JSON" | jq -r '.spec.secretS3.accessSecretKey')
ACTUAL_PROTOCOLS=$(echo "$BUCKET_INFO_JSON" | jq -c '.spec.protocols')

# Verify bucketName
if [[ "$ACTUAL_BUCKET_NAME" != "$BUCKET_NAME" ]]; then
log_and_run echo "Bucket name mismatch! Expected: $BUCKET_NAME, Found: $ACTUAL_BUCKET_NAME"
exit 1
fi

# Verify endpoint
EXPECTED_ENDPOINT="$S3_ENDPOINT"
if [[ "$ACTUAL_ENDPOINT" != "$EXPECTED_ENDPOINT" ]]; then
log_and_run echo "Endpoint mismatch! Expected: $EXPECTED_ENDPOINT, Found: $ACTUAL_ENDPOINT"
exit 1
fi

# Verify region
if [[ "$ACTUAL_REGION" != "$REGION" ]]; then
log_and_run echo "Region mismatch! Expected: $REGION, Found: $ACTUAL_REGION"
exit 1
fi

# Verify accessSecretKey exists
if [[ -z "$ACTUAL_ACCESS_KEY_ID" ]]; then
log_and_run echo "AccessSecretKey is empty!"
exit 1
fi

# Verify accessSecretKey exists
if [[ -z "$ACTUAL_ACCESS_SECRET_KEY" ]]; then
log_and_run echo "AccessSecretKey is empty!"
exit 1
fi

# Verify protocol
EXPECTED_PROTOCOLS='["s3"]'
if [[ "$ACTUAL_PROTOCOLS" != "$EXPECTED_PROTOCOLS" ]]; then
log_and_run echo "Protocols mismatch! Expected: $EXPECTED_PROTOCOLS, Found: $ACTUAL_PROTOCOLS"
exit 1
fi

# cleanup
log_and_run kubectl delete -f cosi-examples/brownfield/bucketaccess.yaml
log_and_run kubectl delete -f cosi-examples/brownfield/bucketaccessclass.yaml
log_and_run kubectl delete -f cosi-examples/brownfield/bucketclaim.yaml
log_and_run kubectl delete -f cosi-examples/brownfield/bucketclass.yaml

# Check if the bucket is not deleted and Retain policy is respected
log_and_run echo "Checking if bucket $BUCKET_NAME exists"
aws --endpoint-url "$S3_ENDPOINT" s3api head-bucket --bucket "$BUCKET_NAME"
log_and_run echo "Bucket $BUCKET_NAME has been retained!"
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
set -e

# Define log file for debugging
LOG_FILE=".github/e2e_tests/artifacts/logs/e2e_tests/bucket_creation_test.log"
LOG_FILE=".github/e2e_tests/artifacts/logs/e2e_tests/greenfield.log"
mkdir -p "$(dirname "$LOG_FILE")" # Ensure the log directory exists

CONTAINER_NAME=s3_and_iam_deployment-iam-1
Expand Down Expand Up @@ -83,19 +83,19 @@ EOF

# Step 4: Apply Bucket Class
log_and_run echo "Applying Bucket Class..."
log_and_run kubectl apply -f cosi-examples/bucketclass.yaml
log_and_run kubectl apply -f cosi-examples/greenfield/bucketclass.yaml

# Step 5: Apply Bucket Claim
log_and_run echo "Applying Bucket Claim..."
log_and_run kubectl apply -f cosi-examples/bucketclaim.yaml
log_and_run kubectl apply -f cosi-examples/greenfield/bucketclaim.yaml

# Step 6: Apply Bucket Access Class
log_and_run echo "Applying Bucket Access Class..."
log_and_run kubectl apply -f cosi-examples/bucketaccessclass.yaml
log_and_run kubectl apply -f cosi-examples/greenfield/bucketaccessclass.yaml

# Step 7: Apply Bucket Access
log_and_run echo "Applying Bucket Access..."
log_and_run kubectl apply -f cosi-examples/bucketaccess.yaml
log_and_run kubectl apply -f cosi-examples/greenfield/bucketaccess.yaml

# Step 8: Verify Bucket Creation with Retry
log_and_run echo "Listing all S3 buckets before verification..."
Expand Down Expand Up @@ -213,7 +213,7 @@ fi

# Step 11: Delete Bucket Access Resource
log_and_run echo "Deleting Bucket Access Resource..."
log_and_run kubectl delete -f cosi-examples/bucketaccess.yaml
log_and_run kubectl delete -f cosi-examples/greenfield/bucketaccess.yaml

# Step 12: Verify IAM User Deletion
log_and_run echo "Verifying IAM user '$IAM_USER_NAME' deletion..."
Expand All @@ -230,8 +230,8 @@ fi
# Step 13: Test deletion bucket with deletion policy set

log_and_run echo "Applying Bucket Class with deletion policy and respective Bucket Claim..."
log_and_run kubectl apply -f cosi-examples/bucketclass-deletion-policy.yaml
log_and_run kubectl apply -f cosi-examples/bucketclaim-deletion-policy.yaml
log_and_run kubectl apply -f cosi-examples/greenfield/bucketclass-deletion-policy.yaml
log_and_run kubectl apply -f cosi-examples/greenfield/bucketclaim-deletion-policy.yaml

log_and_run echo "Listing all S3 buckets before deletion..."
log_and_run aws s3 ls --endpoint-url "$S3_ENDPOINT"
Expand Down Expand Up @@ -259,7 +259,7 @@ if [ -z "$BUCKET_TO_BE_DELETED" ]; then
fi

log_and_run echo "Deleting Bucket Claim..."
log_and_run kubectl delete -f cosi-examples/bucketclaim-deletion-policy.yaml
log_and_run kubectl delete -f cosi-examples/greenfield/bucketclaim-deletion-policy.yaml

# Check if the bucket with name $BUCKET_TO_BE_DELETED exists by doing a head bucket.
# If bucket exists, retry with ATTEMPTS and DELAY. If bucket is not found, test success.
Expand Down
9 changes: 6 additions & 3 deletions .github/workflows/ci-e2e-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -96,10 +96,13 @@ jobs:
docker save "$CLOUDSERVER_IMAGE" -o /tmp/.docker_cache/cloudserver_image.tar
shell: bash

- name: E2E tests for COSI driver using kustomize
- name: E2E tests for greenfield use case using kustomize
run: |
pwd
.github/scripts/e2e_tests.sh
.github/scripts/e2e_tests_greenfield_use_case.sh
- name: E2E tests for brownfield use case using kustomize
run: |
.github/scripts/e2e_tests_brownfield_use_case.sh
- name: "Delay completion"
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
Expand Down
16 changes: 16 additions & 0 deletions cosi-examples/brownfield/bucket.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: objectstorage.k8s.io/v1alpha1
kind: Bucket
metadata:
name: brownfield-bucket # should be same as bucket name
namespace: scality-object-storage
spec:
bucketClaim: {}
bucketClassName: brownfield-bucket-class
driverName: cosi.scality.com
deletionPolicy: Retain
existingBucketID: brownfield-bucket # name of pre-existing bucket in S3
parameters:
objectStorageSecretName: s3-secret-for-cosi
objectStorageSecretNamespace: default
protocols:
- S3
10 changes: 10 additions & 0 deletions cosi-examples/brownfield/bucketaccess.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketAccess
metadata:
name: brownfield-bucket-access
namespace: scality-object-storage
spec:
bucketAccessClassName: brownfield-bucket-access-class
bucketClaimName: brownfield-bucket-claim
credentialsSecretName: brownfield-bucket-secret
protocol: s3
10 changes: 10 additions & 0 deletions cosi-examples/brownfield/bucketaccessclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
kind: BucketAccessClass
apiVersion: objectstorage.k8s.io/v1alpha1
metadata:
name: brownfield-bucket-access-class
namespace: scality-object-storage
driverName: cosi.scality.com
authenticationType: KEY
parameters:
objectStorageSecretName: s3-secret-for-cosi
objectStorageSecretNamespace: default
10 changes: 10 additions & 0 deletions cosi-examples/brownfield/bucketclaim.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketClaim
metadata:
name: brownfield-bucket-claim
namespace: scality-object-storage
spec:
bucketClassName: brownfield-bucket-class
existingBucketName: brownfield-bucket # name of Bucket object
protocols:
- S3
10 changes: 10 additions & 0 deletions cosi-examples/brownfield/bucketclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: objectstorage.k8s.io/v1alpha1
kind: BucketClass
metadata:
name: brownfield-bucket-class
namespace: scality-object-storage
driverName: cosi.scality.com
deletionPolicy: Delete
parameters:
objectStorageSecretName: s3-secret-for-cosi
objectStorageSecretNamespace: default
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion docs/driver-params.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ The table below details the configuration parameters for BucketClass, which dete
| `objectStorageSecretName` | The name of the Kubernetes secret containing S3 credentials and configuration. | `string` | Yes |
| `objectStorageSecretNamespace` | The namespace in which the secret is located (e.g., `default`). | `string` (e.g., `default`) | Yes |

[Example](../cosi-examples/bucketclass.yaml)
[Example](../cosi-examples/greenfield/bucketclass.yaml)

## Configuration Parameters for Kubernetes secret containing S3 credentials and configuration

Expand Down
6 changes: 0 additions & 6 deletions pkg/driver/provisioner_server_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,16 +129,10 @@ func (s *ProvisionerServer) DriverCreateBucket(ctx context.Context,
err = s3Client.CreateBucket(ctx, bucketName, *s3Params)
if err != nil {
var bucketAlreadyExists *s3types.BucketAlreadyExists
var bucketOwnedByYou *s3types.BucketAlreadyOwnedByYou

if errors.As(err, &bucketAlreadyExists) {
klog.V(c.LvlInfo).InfoS("Bucket already exists", "bucketName", bucketName)
return nil, status.Errorf(codes.AlreadyExists, "Bucket already exists: %s", bucketName)
} else if errors.As(err, &bucketOwnedByYou) {
klog.V(c.LvlInfo).InfoS("Bucket already exists and is owned by you", "bucketName", bucketName)
return &cosiapi.DriverCreateBucketResponse{
BucketId: bucketName,
}, nil
} else {
klog.ErrorS(err, "Failed to create bucket", "bucketName", bucketName)
return nil, status.Error(codes.Internal, "Failed to create bucket")
Expand Down
10 changes: 0 additions & 10 deletions pkg/driver/provisioner_server_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,16 +271,6 @@ var _ = Describe("ProvisionerServer DriverCreateBucket", Ordered, func() {
Expect(status.Code(err)).To(Equal(codes.AlreadyExists))
})

It("should return success if bucket already owned by you", func(ctx SpecContext) {
mockS3.CreateBucketFunc = func(ctx context.Context, input *s3.CreateBucketInput, _ ...func(*s3.Options)) (*s3.CreateBucketOutput, error) {
return nil, &types.BucketAlreadyOwnedByYou{}
}

resp, err := provisioner.DriverCreateBucket(ctx, request)
Expect(err).To(BeNil())
Expect(resp.BucketId).To(Equal(testBucketName))
})

It("should return Internal error for other S3 errors", func(ctx SpecContext) {
mockS3.CreateBucketFunc = func(ctx context.Context, input *s3.CreateBucketInput, _ ...func(*s3.Options)) (*s3.CreateBucketOutput, error) {
return nil, errors.New("SomeOtherError")
Expand Down

0 comments on commit a9cf51d

Please sign in to comment.