diff --git a/config/crd/bases/psmdb.percona.com_perconaservermongodbs.yaml b/config/crd/bases/psmdb.percona.com_perconaservermongodbs.yaml index cf4029a9f..365055c8f 100644 --- a/config/crd/bases/psmdb.percona.com_perconaservermongodbs.yaml +++ b/config/crd/bases/psmdb.percona.com_perconaservermongodbs.yaml @@ -8226,6 +8226,64 @@ spec: - size type: object type: array + roles: + items: + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - privileges + - role + type: object + type: array schedulerName: type: string secrets: @@ -18485,14 +18543,10 @@ spec: key: type: string name: - default: "" type: string - optional: - type: boolean required: - - key + - name type: object - x-kubernetes-map-type: atomic roles: items: properties: @@ -18506,7 +18560,6 @@ spec: type: object type: array required: - - db - name - passwordSecretRef - roles diff --git a/deploy/bundle.yaml b/deploy/bundle.yaml index 7027a81ce..85a7dee0c 100644 --- a/deploy/bundle.yaml +++ b/deploy/bundle.yaml @@ -8908,6 +8908,64 @@ spec: - size type: object type: array + roles: + items: + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - privileges + - role + type: object + type: array schedulerName: type: string secrets: @@ -19167,14 +19225,10 @@ spec: key: type: string name: - default: "" type: string - optional: - type: boolean required: - - key + - name type: object - x-kubernetes-map-type: atomic roles: items: properties: @@ -19188,7 +19242,6 @@ spec: type: object type: array required: - - db - name - passwordSecretRef - roles diff --git a/deploy/cr.yaml b/deploy/cr.yaml index 0ff762b03..b44f32d0e 100644 --- a/deploy/cr.yaml +++ b/deploy/cr.yaml @@ -559,6 +559,39 @@ spec: # - "host1" # - "host2" +# roles: +# - role: myClusterwideAdmin +# db: admin +# privileges: +# - resource: +# cluster: true +# actions: +# - addShard +# - resource: +# db: config +# collection: '' +# actions: +# - find +# - update +# - insert +# - remove +# roles: +# - role: read +# db: admin +# - role: my-role +# db: myDb +# privileges: +# - resource: +# db: '' +# collection: '' +# actions: +# - find +# authenticationRestrictions: +# - clientSource: +# - 127.0.0.1 +# serverAddress: +# - 127.0.0.1 + # users: # - name: my-user # db: admin diff --git a/deploy/crd.yaml b/deploy/crd.yaml index 04b14a1f2..13b4aaa26 100644 --- a/deploy/crd.yaml +++ b/deploy/crd.yaml @@ -8908,6 +8908,64 @@ spec: - size type: object type: array + roles: + items: + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - privileges + - role + type: object + type: array schedulerName: type: string secrets: @@ -19167,14 +19225,10 @@ spec: key: type: string name: - default: "" type: string - optional: - type: boolean required: - - key + - name type: object - x-kubernetes-map-type: atomic roles: items: properties: @@ -19188,7 +19242,6 @@ spec: type: object type: array required: - - db - name - passwordSecretRef - roles diff --git a/deploy/cw-bundle.yaml b/deploy/cw-bundle.yaml index 24f09f997..858f44456 100644 --- a/deploy/cw-bundle.yaml +++ b/deploy/cw-bundle.yaml @@ -8908,6 +8908,64 @@ spec: - size type: object type: array + roles: + items: + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - privileges + - role + type: object + type: array schedulerName: type: string secrets: @@ -19167,14 +19225,10 @@ spec: key: type: string name: - default: "" type: string - optional: - type: boolean required: - - key + - name type: object - x-kubernetes-map-type: atomic roles: items: properties: @@ -19188,7 +19242,6 @@ spec: type: object type: array required: - - db - name - passwordSecretRef - roles diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-five.json b/e2e-tests/custom-users-roles-sharded/compare/role-five.json new file mode 100644 index 000000000..9117841ad --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-five.json @@ -0,0 +1,78 @@ +switched to db testAdmin2 +{ + "_id" : "testAdmin2.role-five", + "role" : "role-five", + "db" : "testAdmin2", + "privileges" : [ + { + "resource" : { + "db" : "testAdmin2", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "testAdmin2", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-four.json b/e2e-tests/custom-users-roles-sharded/compare/role-four.json new file mode 100644 index 000000000..f57e50bdd --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-four.json @@ -0,0 +1,78 @@ +switched to db testAdmin1 +{ + "_id" : "testAdmin1.role-four", + "role" : "role-four", + "db" : "testAdmin1", + "privileges" : [ + { + "resource" : { + "db" : "testAdmin1", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "testAdmin1", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-one.json b/e2e-tests/custom-users-roles-sharded/compare/role-one.json new file mode 100644 index 000000000..ec6492800 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-one.json @@ -0,0 +1,124 @@ +switched to db admin +{ + "_id" : "admin.role-one", + "role" : "role-one", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-three.json b/e2e-tests/custom-users-roles-sharded/compare/role-three.json new file mode 100644 index 000000000..3d0403811 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-three.json @@ -0,0 +1,34 @@ +switched to db admin +{ + "_id" : "admin.role-three", + "role" : "role-three", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-two-updated.json b/e2e-tests/custom-users-roles-sharded/compare/role-two-updated.json new file mode 100644 index 000000000..fa8ba47bb --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-two-updated.json @@ -0,0 +1,34 @@ +switched to db admin +{ + "_id" : "admin.role-two", + "role" : "role-two", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/role-two.json b/e2e-tests/custom-users-roles-sharded/compare/role-two.json new file mode 100644 index 000000000..403c6e957 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/role-two.json @@ -0,0 +1,80 @@ +switched to db admin +{ + "_id" : "admin.role-two", + "role" : "role-two", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/run b/e2e-tests/custom-users-roles-sharded/compare/run new file mode 100755 index 000000000..d6cea068f --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/run @@ -0,0 +1,441 @@ +#!/bin/bash + +set -o errexit + +compare() { + local database="$1" + local command="$2" + local uri="$3" + local target="$4" + + run_mongos "use ${database}\n ${command}" "$uri" "mongodb" \ + | egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' \ + | $sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' \ + | sed '/"userId"/d' \ + >$tmp_dir/${target} + + diff ${test_dir}/compare/${target}.json $tmp_dir/${target} +} + +check_auth() { + local uri="$1" + + ping=$(run_mongos "db.runCommand({ ping: 1 }).ok" "$uri" "" "" "--quiet" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:') + desc "ping return" + if [ "${ping}" != "1" ]; then + return 1 + fi +} + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions +set_debug + +cluster="some-name" + +create_infra "$namespace" + +mongosUri="userAdmin:userAdmin123456@$cluster-mongos.$namespace" + +desc 'create secrets and start client' +kubectl_bin apply -f "${conf_dir}/client.yml" \ + -f "${conf_dir}/secrets.yml" \ + -f "${test_dir}/conf/app-user-secrets.yml" + + +apply_s3_storage_secrets +if version_gt "1.19" && [ $EKS -ne 1 ]; then + cat "$conf_dir/container-rc.yaml" | $sed 's/docker/runc/g' | kubectl_bin apply -f - +elif version_gt "1.24" && [ $EKS -eq 1 ]; then + cat "$conf_dir/container-rc.yaml" | $sed 's/docker/runc/g' | kubectl_bin apply -f - +else + kubectl_bin apply -f "$conf_dir/container-rc.yaml" +fi + +desc 'create first PSMDB cluster' + +apply_cluster "$test_dir/conf/$cluster-rs0.yml" + +desc 'check if all 3 Pods started' +wait_for_running $cluster-rs0 3 +wait_for_running $cluster-cfg 3 "false" +wait_for_running $cluster-mongos 3 +wait_cluster_consistency "${cluster}" + +desc 'check if service and statefulset created with expected config' +compare_kubectl statefulset/$cluster-rs0 +compare_kubectl statefulset/$cluster-cfg +compare_kubectl statefulset/$cluster-mongos "" + +desc 'check user created on cluster creation' + +userOne="user-one" +userOnePass=$(getSecretData "user-one" "userOnePassKey") +compare 'admin' 'db.getUser("user-one")' "$mongosUri" "user-one" +check_auth "$userOne:$userOnePass@$cluster-mongos.$namespace" + +desc 'delete initial user from CR and create a new one' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-two", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two" + +userTwo="user-two" +userTwoPass=$(getSecretData "user-two" "userTwoPassKey") + +# Both users should be in the DB, the operator should not delete the user removed from the CR +check_auth "$userTwo:$userTwoPass@$cluster-mongos.$namespace" +check_auth "$userOne:$userOnePass@$cluster-mongos.$namespace" + +desc 'check password change' +userTwoNewPass="new-user-two-password" +patch_secret "user-two" "userTwoPassKey" "$(echo -n "$userTwoNewPass" | base64)" +sleep 20 + +check_auth "$userTwo:$userTwoNewPass@$cluster-mongos.$namespace" + +desc 'check user roles update from CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-two", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +desc 'check user roles update from DB' + +run_mongos \ + 'use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +desc 'check user recreated after deleted from DB' +run_mongos \ + 'use admin\n db.dropUser("user-two")' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +desc 'check new user created after updated user name via CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-three", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-three")' "$mongosUri" "user-three-admin-db" +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +# user-three and user-two should be in the DB +check_auth "$userTwo:$userTwoNewPass@$cluster-mongos.$namespace" +check_auth "user-three:$userTwoNewPass@$cluster-mongos.$namespace" + +desc 'check new user created after updated user db via CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-three", + "db":"newDb", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'newDb' 'db.getUser("user-three")' "$mongosUri" "user-three-newDb-db" +compare 'admin' 'db.getUser("user-three")' "$mongosUri" "user-three-admin-db" + +desc 'check new user created with default db and secret password key' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-four", + "passwordSecretRef": { + "name": "user-two" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-four")' "$mongosUri" "user-four" + +# ======================== Roles ======================== + +desc 'check user role on cluster initialization' +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-one" + +desc 'check role recreated after deleted from DB' +run_mongos \ + 'use admin\n db.dropRole("role-one")' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-one" + +desc 'delete initial role from CR and create a new one' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-two", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ], + "roles": [ + { + "role": "read", + "db": "admin" + } + ] + } + ] + }}' + +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-one" +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-two" + +desc 'check role update from CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-two", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ] + } + ] + }}' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-two-updated" + +desc 'check role update from DB' +run_mongos \ + 'use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-two-updated" + +desc 'check new role created after updated role name via CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-three", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ] + } + ] + }}' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-three" + +desc 'check creating multiple roles and the users in a single CR apply' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": { + "roles": [ + { + "role": "role-four", + "db": "testAdmin1", + "privileges": [ + { + "resource": { + "db": "testAdmin1", + "collection": "" + }, + "actions": [ + "find", + "listIndexes", + "listCollections" + ] + }, + { + "resource": { + "db": "testAdmin1", + "collection": "system.profile" + }, + "actions": [ + "dbStats", + "collStats", + "indexStats" + ] + }, + { + "resource": { + "db": "testAdmin1", + "collection": "system.version" + }, + "actions": [ + "find" + ] + } + ] + }, + { + "role": "role-five", + "db": "testAdmin2", + "privileges": [ + { + "resource": { + "db": "testAdmin2", + "collection": "" + }, + "actions": [ + "find", + "listIndexes", + "listCollections" + ] + }, + { + "resource": { + "db": "testAdmin2", + "collection": "system.profile" + }, + "actions": [ + "dbStats", + "collStats", + "indexStats" + ] + }, + { + "resource": { + "db": "testAdmin2", + "collection": "system.version" + }, + "actions": [ + "find" + ] + } + ] + } + ], + "users": [ + { + "name": "user-five", + "db": "testAdmin", + "passwordSecretRef": { + "name": "user-one", + "key": "userOnePassKey" + }, + "roles": [ + { + "name": "role-four", + "db": "testAdmin1" + }, + { + "name": "role-five", + "db": "testAdmin2" + } + ] + }, + { + "name": "user-six", + "db": "testAdmin", + "passwordSecretRef": { + "name": "user-one", + "key": "userOnePassKey" + }, + "roles": [ + { + "name": "role-five", + "db": "testAdmin2" + } + ] + } + ] + }}' +wait_for_running $cluster-rs0 3 +compare 'testAdmin1' 'db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-four" +compare 'testAdmin2' 'db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-five" +compare 'testAdmin' 'db.getUser("user-five")' "$mongosUri" "user-five" +compare 'testAdmin' 'db.getUser("user-six")' "$mongosUri" "user-six" + +destroy $namespace + +desc 'test passed' diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg-4-oc.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg-4-oc.yml new file mode 100644 index 000000000..654bc8339 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg-4-oc.yml @@ -0,0 +1,219 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + serviceName: some-name-cfg + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=cfg + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --configsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerIndexPrefixCompression=true + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: cfg + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: {} + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: cfg-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + runtimeClassName: container-rc + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg-oc.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg-oc.yml new file mode 100644 index 000000000..33319e5a4 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg-oc.yml @@ -0,0 +1,218 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + serviceName: some-name-cfg + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=cfg + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --configsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerIndexPrefixCompression=true + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: cfg + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: {} + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: cfg-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg.yml new file mode 100644 index 000000000..3ae11fcc7 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-cfg.yml @@ -0,0 +1,221 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + name: some-name-cfg + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + serviceName: some-name-cfg + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: cfg + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: cfg + topologyKey: kubernetes.io/hostname + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=cfg + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --configsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerIndexPrefixCompression=true + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: cfg + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: cfg-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + runtimeClassName: container-rc + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + status: + phase: Pending diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos-4-oc.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos-4-oc.yml new file mode 100644 index 000000000..ecd7fa2ad --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos-4-oc.yml @@ -0,0 +1,208 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + name: some-name-mongos + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + serviceName: "" + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + topologyKey: kubernetes.io/hostname + containers: + - args: + - mongos + - --bind_ip_all + - --port=27017 + - --sslAllowInvalidCertificates + - --configdb + - cfg/some-name-cfg-0.some-name-cfg.NAME_SPACE.svc.cluster.local:27017,some-name-cfg-1.some-name-cfg.NAME_SPACE.svc.cluster.local:27017,some-name-cfg-2.some-name-cfg.NAME_SPACE.svc.cluster.local:27017 + - --relaxPermChecks + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --config=/etc/mongos-config/mongos.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: MONGODB_PORT + value: "27017" + envFrom: + - secretRef: + name: some-users + optional: false + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --component + - mongos + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "10" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongos + ports: + - containerPort: 27017 + name: mongos + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongos + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 1 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongos-config + name: config + - mountPath: /etc/users-secret + name: users-secret-file + readOnly: true + - mountPath: /opt/percona + name: bin + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: mongos-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + runtimeClassName: container-rc + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - emptyDir: {} + name: mongod-data + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - configMap: + defaultMode: 420 + name: some-name-mongos + optional: true + name: config + - emptyDir: {} + name: bin + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos-oc.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos-oc.yml new file mode 100644 index 000000000..f1e5fec54 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos-oc.yml @@ -0,0 +1,207 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + name: some-name-mongos + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + serviceName: "" + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + topologyKey: kubernetes.io/hostname + containers: + - args: + - mongos + - --bind_ip_all + - --port=27017 + - --sslAllowInvalidCertificates + - --configdb + - cfg/some-name-cfg-0.some-name-cfg.NAME_SPACE.svc.cluster.local:27017,some-name-cfg-1.some-name-cfg.NAME_SPACE.svc.cluster.local:27017,some-name-cfg-2.some-name-cfg.NAME_SPACE.svc.cluster.local:27017 + - --relaxPermChecks + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --config=/etc/mongos-config/mongos.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: MONGODB_PORT + value: "27017" + envFrom: + - secretRef: + name: some-users + optional: false + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --component + - mongos + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "10" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongos + ports: + - containerPort: 27017 + name: mongos + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongos + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 1 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongos-config + name: config + - mountPath: /etc/users-secret + name: users-secret-file + readOnly: true + - mountPath: /opt/percona + name: bin + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: mongos-sidecar-1 + resources: {} + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - emptyDir: {} + name: mongod-data + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - configMap: + defaultMode: 420 + name: some-name-mongos + optional: true + name: config + - emptyDir: {} + name: bin + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos.yml new file mode 100644 index 000000000..5efe518ab --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-mongos.yml @@ -0,0 +1,210 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + name: some-name-mongos + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + serviceName: "" + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: mongos + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + topologyKey: kubernetes.io/hostname + containers: + - args: + - mongos + - --bind_ip_all + - --port=27017 + - --sslAllowInvalidCertificates + - --configdb + - cfg/some-name-cfg-0.some-name-cfg.NAME_SPACE.svc.cluster.local:27017,some-name-cfg-1.some-name-cfg.NAME_SPACE.svc.cluster.local:27017,some-name-cfg-2.some-name-cfg.NAME_SPACE.svc.cluster.local:27017 + - --relaxPermChecks + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --config=/etc/mongos-config/mongos.conf + command: + - /opt/percona/ps-entry.sh + env: + - name: MONGODB_PORT + value: "27017" + envFrom: + - secretRef: + name: some-users + optional: false + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --component + - mongos + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "10" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongos + ports: + - containerPort: 27017 + name: mongos + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongos + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 1 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongos-config + name: config + - mountPath: /etc/users-secret + name: users-secret-file + readOnly: true + - mountPath: /opt/percona + name: bin + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: mongos-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + runtimeClassName: container-rc + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - emptyDir: {} + name: mongod-data + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + - configMap: + defaultMode: 420 + name: some-name-mongos + optional: true + name: config + - emptyDir: {} + name: bin + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0-4-oc.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0-4-oc.yml new file mode 100644 index 000000000..a433f7736 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0-4-oc.yml @@ -0,0 +1,228 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: rs-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + runtimeClassName: container-rc + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0-oc.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0-oc.yml new file mode 100644 index 000000000..83a28c098 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0-oc.yml @@ -0,0 +1,227 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: rs-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0.yml b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0.yml new file mode 100644 index 000000000..090128105 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/statefulset_some-name-rs0.yml @@ -0,0 +1,230 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: {} + generation: 1 + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + name: some-name-rs0 + ownerReferences: + - controller: true + kind: PerconaServerMongoDB + name: some-name +spec: + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + serviceName: some-name-rs0 + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/component: mongod + app.kubernetes.io/instance: some-name + app.kubernetes.io/managed-by: percona-server-mongodb-operator + app.kubernetes.io/name: percona-server-mongodb + app.kubernetes.io/part-of: percona-server-mongodb + app.kubernetes.io/replset: rs0 + spec: + containers: + - args: + - --bind_ip_all + - --auth + - --dbpath=/data/db + - --port=27017 + - --replSet=rs0 + - --storageEngine=wiredTiger + - --relaxPermChecks + - --sslAllowInvalidCertificates + - --clusterAuthMode=x509 + - --tlsMode=preferTLS + - --shardsvr + - --enableEncryption + - --encryptionKeyFile=/etc/mongodb-encryption/encryption-key + - --wiredTigerCacheSizeGB=0.25 + - --wiredTigerIndexPrefixCompression=true + - --config=/etc/mongodb-config/mongod.conf + - --quiet + command: + - /opt/percona/ps-entry.sh + env: + - name: SERVICE_NAME + value: some-name + - name: MONGODB_PORT + value: "27017" + - name: MONGODB_REPLSET + value: rs0 + envFrom: + - secretRef: + name: internal-some-name-users + optional: false + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - liveness + - --ssl + - --sslInsecure + - --sslCAFile + - /etc/mongodb-ssl/ca.crt + - --sslPEMKeyFile + - /tmp/tls.pem + - --startupDelaySeconds + - "7200" + failureThreshold: 4 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 + name: mongod + ports: + - containerPort: 27017 + name: mongodb + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/mongodb-healthcheck + - k8s + - readiness + - --component + - mongod + failureThreshold: 8 + initialDelaySeconds: 10 + periodSeconds: 3 + successThreshold: 1 + timeoutSeconds: 2 + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + securityContext: + runAsNonRoot: true + runAsUser: 1001 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /etc/mongodb-secrets + name: some-name-mongodb-keyfile + readOnly: true + - mountPath: /etc/mongodb-ssl + name: ssl + readOnly: true + - mountPath: /etc/mongodb-ssl-internal + name: ssl-internal + readOnly: true + - mountPath: /etc/mongodb-config + name: config + - mountPath: /opt/percona + name: bin + - mountPath: /etc/mongodb-encryption + name: some-name-mongodb-encryption-key + readOnly: true + - mountPath: /etc/users-secret + name: users-secret-file + workingDir: /data/db + - args: + - -c + - while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done + command: + - /bin/sh + imagePullPolicy: Always + name: rs-sidecar-1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + initContainers: + - command: + - /init-entrypoint.sh + imagePullPolicy: Always + name: mongo-init + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 100M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /data/db + name: mongod-data + - mountPath: /opt/percona + name: bin + restartPolicy: Always + runtimeClassName: container-rc + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 60 + volumes: + - name: some-name-mongodb-keyfile + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-keyfile + - emptyDir: {} + name: bin + - configMap: + defaultMode: 420 + name: some-name-rs0-mongod + optional: true + name: config + - name: some-name-mongodb-encryption-key + secret: + defaultMode: 288 + optional: false + secretName: some-name-mongodb-encryption-key + - name: ssl + secret: + defaultMode: 288 + optional: false + secretName: some-name-ssl + - name: ssl-internal + secret: + defaultMode: 288 + optional: true + secretName: some-name-ssl-internal + - name: users-secret-file + secret: + defaultMode: 420 + secretName: internal-some-name-users + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: mongod-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + status: + phase: Pending diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-five.json b/e2e-tests/custom-users-roles-sharded/compare/user-five.json new file mode 100644 index 000000000..a25a7b96c --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-five.json @@ -0,0 +1,21 @@ +switched to db testAdmin +{ + "_id" : "testAdmin.user-five", + "user" : "user-five", + "db" : "testAdmin", + "roles" : [ + { + "role" : "role-four", + "db" : "testAdmin1" + }, + { + "role" : "role-five", + "db" : "testAdmin2" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-four.json b/e2e-tests/custom-users-roles-sharded/compare/user-four.json new file mode 100644 index 000000000..4c274f864 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-four.json @@ -0,0 +1,17 @@ +switched to db admin +{ + "_id" : "admin.user-four", + "user" : "user-four", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-one.json b/e2e-tests/custom-users-roles-sharded/compare/user-one.json new file mode 100644 index 000000000..031eba77f --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-one.json @@ -0,0 +1,21 @@ +switched to db admin +{ + "_id" : "admin.user-one", + "user" : "user-one", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + }, + { + "role" : "userAdminAnyDatabase", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-six.json b/e2e-tests/custom-users-roles-sharded/compare/user-six.json new file mode 100644 index 000000000..834527db5 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-six.json @@ -0,0 +1,17 @@ +switched to db testAdmin +{ + "_id" : "testAdmin.user-six", + "user" : "user-six", + "db" : "testAdmin", + "roles" : [ + { + "role" : "role-five", + "db" : "testAdmin2" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-three-admin-db.json b/e2e-tests/custom-users-roles-sharded/compare/user-three-admin-db.json new file mode 100644 index 000000000..dd31dd6e2 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-three-admin-db.json @@ -0,0 +1,17 @@ +switched to db admin +{ + "_id" : "admin.user-three", + "user" : "user-three", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-three-newDb-db.json b/e2e-tests/custom-users-roles-sharded/compare/user-three-newDb-db.json new file mode 100644 index 000000000..ae5c718de --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-three-newDb-db.json @@ -0,0 +1,17 @@ +switched to db newDb +{ + "_id" : "newDb.user-three", + "user" : "user-three", + "db" : "newDb", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-two-update-roles.json b/e2e-tests/custom-users-roles-sharded/compare/user-two-update-roles.json new file mode 100644 index 000000000..d41edcf5f --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-two-update-roles.json @@ -0,0 +1,17 @@ +switched to db admin +{ + "_id" : "admin.user-two", + "user" : "user-two", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles-sharded/compare/user-two.json b/e2e-tests/custom-users-roles-sharded/compare/user-two.json new file mode 100644 index 000000000..768466e38 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/compare/user-two.json @@ -0,0 +1,21 @@ +switched to db admin +{ + "_id" : "admin.user-two", + "user" : "user-two", + "db" : "admin", + "roles" : [ + { + "role" : "userAdminAnyDatabase", + "db" : "admin" + }, + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/users/conf/custom-user-secrets.yml b/e2e-tests/custom-users-roles-sharded/conf/app-user-secrets.yml similarity index 87% rename from e2e-tests/users/conf/custom-user-secrets.yml rename to e2e-tests/custom-users-roles-sharded/conf/app-user-secrets.yml index b689ba174..17791358e 100644 --- a/e2e-tests/users/conf/custom-user-secrets.yml +++ b/e2e-tests/custom-users-roles-sharded/conf/app-user-secrets.yml @@ -13,3 +13,4 @@ metadata: type: Opaque data: userTwoPassKey: Y2x1c3Rlck1vbml0b3I= + password: Y2x1c3Rlck1vbml0b3I= diff --git a/e2e-tests/custom-users-roles-sharded/conf/secrets.yml b/e2e-tests/custom-users-roles-sharded/conf/secrets.yml new file mode 100644 index 000000000..b0d930794 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/conf/secrets.yml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: some-users +type: Opaque +stringData: + MONGODB_BACKUP_USER: backup + MONGODB_BACKUP_PASSWORD: backup123456 + MONGODB_DATABASE_ADMIN_USER: databaseAdmin + MONGODB_DATABASE_ADMIN_PASSWORD: databaseAdmin123456 + MONGODB_CLUSTER_ADMIN_USER: clusterAdmin + MONGODB_CLUSTER_ADMIN_PASSWORD: clusterAdmin123456 + MONGODB_CLUSTER_MONITOR_USER: clusterMonitor + MONGODB_CLUSTER_MONITOR_PASSWORD: clusterMonitor123456 + MONGODB_USER_ADMIN_USER: userAdmin + MONGODB_USER_ADMIN_PASSWORD: userAdmin123456 + PMM_SERVER_API_KEY: apikey \ No newline at end of file diff --git a/e2e-tests/custom-users-roles-sharded/conf/some-name-rs0.yml b/e2e-tests/custom-users-roles-sharded/conf/some-name-rs0.yml new file mode 100644 index 000000000..bf6682bd6 --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/conf/some-name-rs0.yml @@ -0,0 +1,173 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + + roles: + - role: role-one + db: admin + privileges: + - resource: + cluster: true + actions: + - addShard + - resource: + db: config + collection: '' + actions: + - find + - update + - insert + - remove + roles: + - role: read + db: admin + authenticationRestrictions: + - clientSource: + - 127.0.0.1 + serverAddress: + - 127.0.0.1 + + users: + - name: user-one + db: admin + passwordSecretRef: + name: user-one + key: userOnePassKey + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + + backup: + enabled: false + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + aws-s3: + type: s3 + s3: + credentialsSecret: aws-s3-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb-demand-backup-sharded + insecureSkipTLSVerify: false + minio: + type: s3 + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: http://minio-service:9000/ + insecureSkipTLSVerify: false + gcp-cs: + type: s3 + s3: + credentialsSecret: gcp-cs-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb-demand-backup-sharded + endpointUrl: https://storage.googleapis.com + insecureSkipTLSVerify: false + azure-blob: + type: azure + azure: + container: operator-testing + prefix: psmdb-demand-backup-sharded + credentialsSecret: azure-secret + + tasks: + - name: weekly + enabled: true + schedule: "0 0 * * 0" + compressionType: gzip + storageName: aws-s3 + runtimeClassName: container-rc + sharding: + enabled: true + + configsvrReplSet: + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 3Gi + expose: + enabled: false + exposeType: ClusterIP + sidecars: + - image: busybox + command: ["/bin/sh"] + args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"] + name: cfg-sidecar-1 + runtimeClassName: container-rc + + mongos: + size: 3 + configuration: | + replication: + localPingThresholdMs: 15 + expose: + exposeType: ClusterIP + sidecars: + - image: busybox + command: ["/bin/sh"] + args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"] + name: mongos-sidecar-1 + runtimeClassName: container-rc + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + expose: + enabled: false + exposeType: ClusterIP + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 1Gi + runtimeClassName: container-rc + size: 3 + configuration: | + net: + port: 27017 + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + sidecars: + - image: busybox + command: ["/bin/sh"] + args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"] + name: rs-sidecar-1 + secrets: + users: some-users diff --git a/e2e-tests/custom-users-roles-sharded/run b/e2e-tests/custom-users-roles-sharded/run new file mode 100755 index 000000000..78f8114bd --- /dev/null +++ b/e2e-tests/custom-users-roles-sharded/run @@ -0,0 +1,441 @@ +#!/bin/bash + +set -o errexit + +compare() { + local database="$1" + local command="$2" + local uri="$3" + local target="$4" + + run_mongos "use ${database}\n ${command}" "$uri" "mongodb" \ + | egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' \ + | $sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' \ + | sed '/"userId"/d' \ + >$tmp_dir/${target} + + diff ${test_dir}/compare/${target}.json $tmp_dir/${target} +} + +check_auth() { + local uri="$1" + + ping=$(run_mongos "db.runCommand({ ping: 1 }).ok" "$uri" "" "" "--quiet" | egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:') + desc "ping return" + if [ "${ping}" != "1" ]; then + return 1 + fi +} + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions +set_debug + +cluster="some-name" + +create_infra "$namespace" + +mongosUri="userAdmin:userAdmin123456@$cluster-mongos.$namespace" + +desc 'create secrets and start client' +kubectl_bin apply -f "${conf_dir}/client.yml" \ + -f "${conf_dir}/secrets.yml" \ + -f "${test_dir}/conf/app-user-secrets.yml" + + +apply_s3_storage_secrets +if version_gt "1.19" && [ $EKS -ne 1 ]; then + cat "$conf_dir/container-rc.yaml" | $sed 's/docker/runc/g' | kubectl_bin apply -f - +elif version_gt "1.24" && [ $EKS -eq 1 ]; then + cat "$conf_dir/container-rc.yaml" | $sed 's/docker/runc/g' | kubectl_bin apply -f - +else + kubectl_bin apply -f "$conf_dir/container-rc.yaml" +fi + +desc 'create first PSMDB cluster' + +apply_cluster "$test_dir/conf/$cluster-rs0.yml" + +desc 'check if all 3 Pods started' +wait_for_running $cluster-rs0 3 +wait_for_running $cluster-cfg 3 "false" +wait_for_running $cluster-mongos 3 +wait_cluster_consistency "${cluster}" + +desc 'check if service and statefulset created with expected config' +compare_kubectl statefulset/$cluster-rs0 +compare_kubectl statefulset/$cluster-cfg +compare_kubectl statefulset/$cluster-mongos "" + +desc 'check user created on cluster creation' + +userOne="user-one" +userOnePass=$(getSecretData "user-one" "userOnePassKey") +compare 'admin' 'db.getUser("user-one")' "$mongosUri" "user-one" +check_auth "$userOne:$userOnePass@$cluster-mongos.$namespace" + +desc 'delete initial user from CR and create a new one' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-two", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two" + +userTwo="user-two" +userTwoPass=$(getSecretData "user-two" "userTwoPassKey") + +# Both users should be in the DB, the operator should not delete the user removed from the CR +check_auth "$userTwo:$userTwoPass@$cluster-mongos.$namespace" +check_auth "$userOne:$userOnePass@$cluster-mongos.$namespace" + +desc 'check password change' +userTwoNewPass="new-user-two-password" +patch_secret "user-two" "userTwoPassKey" "$(echo -n "$userTwoNewPass" | base64)" +sleep 20 + +check_auth "$userTwo:$userTwoNewPass@$cluster-mongos.$namespace" + +desc 'check user roles update from CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-two", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +desc 'check user roles update from DB' + +run_mongos \ + 'use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +desc 'check user recreated after deleted from DB' +run_mongos \ + 'use admin\n db.dropUser("user-two")' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +desc 'check new user created after updated user name via CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-three", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-three")' "$mongosUri" "user-three-admin-db" +compare 'admin' 'db.getUser("user-two")' "$mongosUri" "user-two-update-roles" + +# user-three and user-two should be in the DB +check_auth "$userTwo:$userTwoNewPass@$cluster-mongos.$namespace" +check_auth "user-three:$userTwoNewPass@$cluster-mongos.$namespace" + +desc 'check new user created after updated user db via CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-three", + "db":"newDb", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'newDb' 'db.getUser("user-three")' "$mongosUri" "user-three-newDb-db" +compare 'admin' 'db.getUser("user-three")' "$mongosUri" "user-three-admin-db" + +desc 'check new user created with default db and secret password key' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-four", + "passwordSecretRef": { + "name": "user-two" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getUser("user-four")' "$mongosUri" "user-four" + +# ======================== Roles ======================== + +desc 'check user role on cluster initialization' +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-one" + +desc 'check role recreated after deleted from DB' +run_mongos \ + 'use admin\n db.dropRole("role-one")' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-one" + +desc 'delete initial role from CR and create a new one' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-two", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ], + "roles": [ + { + "role": "read", + "db": "admin" + } + ] + } + ] + }}' + +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-one" +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-two" + +desc 'check role update from CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-two", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ] + } + ] + }}' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-two-updated" + +desc 'check role update from DB' +run_mongos \ + 'use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' \ + "$mongosUri" +sleep 15 +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-two-updated" + +desc 'check new role created after updated role name via CR' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-three", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ] + } + ] + }}' +wait_for_running $cluster-rs0 3 + +compare 'admin' 'db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-three" + +desc 'check creating multiple roles and the users in a single CR apply' +kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ + "spec": { + "roles": [ + { + "role": "role-four", + "db": "testAdmin1", + "privileges": [ + { + "resource": { + "db": "testAdmin1", + "collection": "" + }, + "actions": [ + "find", + "listIndexes", + "listCollections" + ] + }, + { + "resource": { + "db": "testAdmin1", + "collection": "system.profile" + }, + "actions": [ + "dbStats", + "collStats", + "indexStats" + ] + }, + { + "resource": { + "db": "testAdmin1", + "collection": "system.version" + }, + "actions": [ + "find" + ] + } + ] + }, + { + "role": "role-five", + "db": "testAdmin2", + "privileges": [ + { + "resource": { + "db": "testAdmin2", + "collection": "" + }, + "actions": [ + "find", + "listIndexes", + "listCollections" + ] + }, + { + "resource": { + "db": "testAdmin2", + "collection": "system.profile" + }, + "actions": [ + "dbStats", + "collStats", + "indexStats" + ] + }, + { + "resource": { + "db": "testAdmin2", + "collection": "system.version" + }, + "actions": [ + "find" + ] + } + ] + } + ], + "users": [ + { + "name": "user-five", + "db": "testAdmin", + "passwordSecretRef": { + "name": "user-one", + "key": "userOnePassKey" + }, + "roles": [ + { + "name": "role-four", + "db": "testAdmin1" + }, + { + "name": "role-five", + "db": "testAdmin2" + } + ] + }, + { + "name": "user-six", + "db": "testAdmin", + "passwordSecretRef": { + "name": "user-one", + "key": "userOnePassKey" + }, + "roles": [ + { + "name": "role-five", + "db": "testAdmin2" + } + ] + } + ] + }}' +wait_for_running $cluster-rs0 3 +compare 'testAdmin1' 'db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-four" +compare 'testAdmin2' 'db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongosUri" "role-five" +compare 'testAdmin' 'db.getUser("user-five")' "$mongosUri" "user-five" +compare 'testAdmin' 'db.getUser("user-six")' "$mongosUri" "user-six" + +destroy $namespace + +desc 'test passed' diff --git a/e2e-tests/custom-users-roles/compare/role-five.json b/e2e-tests/custom-users-roles/compare/role-five.json new file mode 100644 index 000000000..9117841ad --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-five.json @@ -0,0 +1,78 @@ +switched to db testAdmin2 +{ + "_id" : "testAdmin2.role-five", + "role" : "role-five", + "db" : "testAdmin2", + "privileges" : [ + { + "resource" : { + "db" : "testAdmin2", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "testAdmin2", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin2", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/compare/role-four.json b/e2e-tests/custom-users-roles/compare/role-four.json new file mode 100644 index 000000000..f57e50bdd --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-four.json @@ -0,0 +1,78 @@ +switched to db testAdmin1 +{ + "_id" : "testAdmin1.role-four", + "role" : "role-four", + "db" : "testAdmin1", + "privileges" : [ + { + "resource" : { + "db" : "testAdmin1", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "testAdmin1", + "collection" : "" + }, + "actions" : [ + "find", + "listCollections", + "listIndexes" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.profile" + }, + "actions" : [ + "collStats", + "dbStats", + "indexStats" + ] + }, + { + "resource" : { + "db" : "testAdmin1", + "collection" : "system.version" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/compare/role-one.json b/e2e-tests/custom-users-roles/compare/role-one.json new file mode 100644 index 000000000..ec6492800 --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-one.json @@ -0,0 +1,124 @@ +switched to db admin +{ + "_id" : "admin.role-one", + "role" : "role-one", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "cluster" : true + }, + "actions" : [ + "addShard" + ] + }, + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find", + "insert", + "remove", + "update" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ + [ + { + "clientSource" : [ + "127.0.0.1" + ], + "serverAddress" : [ + "127.0.0.1" + ] + } + ] + ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/compare/role-three.json b/e2e-tests/custom-users-roles/compare/role-three.json new file mode 100644 index 000000000..3d0403811 --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-three.json @@ -0,0 +1,34 @@ +switched to db admin +{ + "_id" : "admin.role-three", + "role" : "role-three", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/compare/role-two-updated.json b/e2e-tests/custom-users-roles/compare/role-two-updated.json new file mode 100644 index 000000000..fa8ba47bb --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-two-updated.json @@ -0,0 +1,34 @@ +switched to db admin +{ + "_id" : "admin.role-two", + "role" : "role-two", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/compare/role-two.json b/e2e-tests/custom-users-roles/compare/role-two.json new file mode 100644 index 000000000..403c6e957 --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/role-two.json @@ -0,0 +1,80 @@ +switched to db admin +{ + "_id" : "admin.role-two", + "role" : "role-two", + "db" : "admin", + "privileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + } + ], + "roles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "authenticationRestrictions" : [ ], + "inheritedRoles" : [ + { + "role" : "read", + "db" : "admin" + } + ], + "inheritedPrivileges" : [ + { + "resource" : { + "db" : "config", + "collection" : "" + }, + "actions" : [ + "find" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + }, + { + "resource" : { + "db" : "admin", + "collection" : "system.js" + }, + "actions" : [ + "changeStream", + "collStats", + "dbHash", + "dbStats", + "find", + "killCursors", + "listCollections", + "listIndexes", + "listSearchIndexes", + "planCacheRead" + ] + } + ], + "inheritedAuthenticationRestrictions" : [ ], + "isBuiltin" : false +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-five.json b/e2e-tests/custom-users-roles/compare/user-five.json new file mode 100644 index 000000000..a25a7b96c --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-five.json @@ -0,0 +1,21 @@ +switched to db testAdmin +{ + "_id" : "testAdmin.user-five", + "user" : "user-five", + "db" : "testAdmin", + "roles" : [ + { + "role" : "role-four", + "db" : "testAdmin1" + }, + { + "role" : "role-five", + "db" : "testAdmin2" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-four.json b/e2e-tests/custom-users-roles/compare/user-four.json new file mode 100644 index 000000000..4c274f864 --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-four.json @@ -0,0 +1,17 @@ +switched to db admin +{ + "_id" : "admin.user-four", + "user" : "user-four", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-one.json b/e2e-tests/custom-users-roles/compare/user-one.json new file mode 100644 index 000000000..031eba77f --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-one.json @@ -0,0 +1,21 @@ +switched to db admin +{ + "_id" : "admin.user-one", + "user" : "user-one", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + }, + { + "role" : "userAdminAnyDatabase", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-six.json b/e2e-tests/custom-users-roles/compare/user-six.json new file mode 100644 index 000000000..834527db5 --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-six.json @@ -0,0 +1,17 @@ +switched to db testAdmin +{ + "_id" : "testAdmin.user-six", + "user" : "user-six", + "db" : "testAdmin", + "roles" : [ + { + "role" : "role-five", + "db" : "testAdmin2" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-three-admin-db.json b/e2e-tests/custom-users-roles/compare/user-three-admin-db.json new file mode 100644 index 000000000..dd31dd6e2 --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-three-admin-db.json @@ -0,0 +1,17 @@ +switched to db admin +{ + "_id" : "admin.user-three", + "user" : "user-three", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-three-newDb-db.json b/e2e-tests/custom-users-roles/compare/user-three-newDb-db.json new file mode 100644 index 000000000..ae5c718de --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-three-newDb-db.json @@ -0,0 +1,17 @@ +switched to db newDb +{ + "_id" : "newDb.user-three", + "user" : "user-three", + "db" : "newDb", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-two-update-roles.json b/e2e-tests/custom-users-roles/compare/user-two-update-roles.json new file mode 100644 index 000000000..d41edcf5f --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-two-update-roles.json @@ -0,0 +1,17 @@ +switched to db admin +{ + "_id" : "admin.user-two", + "user" : "user-two", + "db" : "admin", + "roles" : [ + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/compare/user-two.json b/e2e-tests/custom-users-roles/compare/user-two.json new file mode 100644 index 000000000..768466e38 --- /dev/null +++ b/e2e-tests/custom-users-roles/compare/user-two.json @@ -0,0 +1,21 @@ +switched to db admin +{ + "_id" : "admin.user-two", + "user" : "user-two", + "db" : "admin", + "roles" : [ + { + "role" : "userAdminAnyDatabase", + "db" : "admin" + }, + { + "role" : "clusterAdmin", + "db" : "admin" + } + ], + "mechanisms" : [ + "SCRAM-SHA-1", + "SCRAM-SHA-256" + ] +} +bye diff --git a/e2e-tests/custom-users-roles/conf/app-user-secrets.yml b/e2e-tests/custom-users-roles/conf/app-user-secrets.yml new file mode 100644 index 000000000..17791358e --- /dev/null +++ b/e2e-tests/custom-users-roles/conf/app-user-secrets.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Secret +metadata: + name: user-one +type: Opaque +data: + userOnePassKey: Y2x1c3Rlck1vbml0b3I= +--- +apiVersion: v1 +kind: Secret +metadata: + name: user-two +type: Opaque +data: + userTwoPassKey: Y2x1c3Rlck1vbml0b3I= + password: Y2x1c3Rlck1vbml0b3I= diff --git a/e2e-tests/custom-users-roles/conf/secrets.yml b/e2e-tests/custom-users-roles/conf/secrets.yml new file mode 100644 index 000000000..b0d930794 --- /dev/null +++ b/e2e-tests/custom-users-roles/conf/secrets.yml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: some-users +type: Opaque +stringData: + MONGODB_BACKUP_USER: backup + MONGODB_BACKUP_PASSWORD: backup123456 + MONGODB_DATABASE_ADMIN_USER: databaseAdmin + MONGODB_DATABASE_ADMIN_PASSWORD: databaseAdmin123456 + MONGODB_CLUSTER_ADMIN_USER: clusterAdmin + MONGODB_CLUSTER_ADMIN_PASSWORD: clusterAdmin123456 + MONGODB_CLUSTER_MONITOR_USER: clusterMonitor + MONGODB_CLUSTER_MONITOR_PASSWORD: clusterMonitor123456 + MONGODB_USER_ADMIN_USER: userAdmin + MONGODB_USER_ADMIN_PASSWORD: userAdmin123456 + PMM_SERVER_API_KEY: apikey \ No newline at end of file diff --git a/e2e-tests/custom-users-roles/conf/some-name-rs0.yml b/e2e-tests/custom-users-roles/conf/some-name-rs0.yml new file mode 100644 index 000000000..0002a19ce --- /dev/null +++ b/e2e-tests/custom-users-roles/conf/some-name-rs0.yml @@ -0,0 +1,120 @@ +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: some-name +spec: + #platform: openshift + image: + imagePullPolicy: Always + + roles: + - role: role-one + db: admin + privileges: + - resource: + cluster: true + actions: + - addShard + - resource: + db: config + collection: '' + actions: + - find + - update + - insert + - remove + roles: + - role: read + db: admin + authenticationRestrictions: + - clientSource: + - 127.0.0.1 + serverAddress: + - 127.0.0.1 + + users: + - name: user-one + db: admin + passwordSecretRef: + name: user-one + key: userOnePassKey + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + backup: + enabled: true + image: perconalab/percona-server-mongodb-operator:1.1.0-backup + storages: + aws-s3: + type: s3 + s3: + credentialsSecret: aws-s3-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb + insecureSkipTLSVerify: false + minio: + type: s3 + s3: + credentialsSecret: minio-secret + region: us-east-1 + bucket: operator-testing + endpointUrl: http://minio-service:9000/ + insecureSkipTLSVerify: false + gcp-cs: + type: s3 + s3: + credentialsSecret: gcp-cs-secret + region: us-east-1 + bucket: operator-testing + prefix: psmdb + endpointUrl: https://storage.googleapis.com + insecureSkipTLSVerify: false + tasks: + - name: weekly + enabled: true + schedule: "0 0 * * 0" + compressionType: gzip + storageName: aws-s3 + replsets: + - name: rs0 + affinity: + antiAffinityTopologyKey: none + configuration: | + operationProfiling: + mode: slowOp + slowOpThresholdMs: 100 + security: + enableEncryption: true + redactClientLogData: false + setParameter: + ttlMonitorSleepSecs: 60 + wiredTigerConcurrentReadTransactions: 128 + wiredTigerConcurrentWriteTransactions: 128 + storage: + engine: wiredTiger + wiredTiger: + collectionConfig: + blockCompressor: snappy + engineConfig: + directoryForIndexes: false + journalCompressor: snappy + indexConfig: + prefixCompression: true + resources: + limits: + cpu: 500m + memory: 1G + requests: + cpu: 100m + memory: 0.1G + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 1Gi + size: 3 + secrets: + users: some-users diff --git a/e2e-tests/custom-users-roles/run b/e2e-tests/custom-users-roles/run new file mode 100755 index 000000000..92dc628aa --- /dev/null +++ b/e2e-tests/custom-users-roles/run @@ -0,0 +1,406 @@ +#!/bin/bash + +set -o errexit + +compare() { + local database="$1" + local command="$2" + local uri="$3" + local target="$4" + + run_mongo "use ${database}\n ${command}" "$uri" "mongodb" \ + | egrep -v 'I NETWORK|W NETWORK|F NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:' \ + | $sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxxesvc/' \ + | sed '/"userId"/d' \ + >$tmp_dir/${target} + + diff ${test_dir}/compare/${target}.json $tmp_dir/${target} +} + +test_dir=$(realpath $(dirname $0)) +. ${test_dir}/../functions +set_debug + +psmdb="some-name" +cluster="some-name-rs0" + +create_infra $namespace + +desc 'create secrets and start client' +kubectl_bin apply -f "${conf_dir}/client.yml" \ + -f "${conf_dir}/secrets.yml" \ + -f "${test_dir}/conf/app-user-secrets.yml" + +mongoUri="userAdmin:userAdmin123456@$cluster.$namespace" + +desc "create first PSMDB cluster $cluster" +apply_cluster $test_dir/conf/$cluster.yml + +desc 'Check if all 3 Pods started' +wait_for_running $cluster 3 + +desc 'check user created on cluster creation' + +userOne="user-one" +userOnePass=$(getSecretData "user-one" "userOnePassKey") +compare 'admin' 'db.getUser("user-one")' "$mongoUri" "user-one" +check_mongo_auth "$userOne:$userOnePass@$cluster-0.$cluster.$namespace" + +desc 'delete initial user from CR and create a new one' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-two", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster 3 + +compare 'admin' 'db.getUser("user-two")' "$mongoUri" "user-two" + +userTwo="user-two" +userTwoPass=$(getSecretData "user-two" "userTwoPassKey") + +# Both users should be in the DB, the operator should not delete the user removed from the CR +check_mongo_auth "$userTwo:$userTwoPass@$cluster-0.$cluster.$namespace" +check_mongo_auth "$userOne:$userOnePass@$cluster-0.$cluster.$namespace" + +desc 'check password change' +userTwoNewPass="new-user-two-password" +patch_secret "user-two" "userTwoPassKey" "$(echo -n "$userTwoNewPass" | base64)" +sleep 20 + +check_mongo_auth "$userTwo:$userTwoNewPass@$cluster-0.$cluster.$namespace" + +desc 'check user roles update from CR' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-two", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster 3 +compare 'admin' 'db.getUser("user-two")' "$mongoUri" "user-two-update-roles" + +desc 'check user roles update from DB' + +run_mongo \ + 'use admin\n db.updateUser("user-two", { roles : [{ role : "userAdminAnyDatabase", db: "admin"}]})' \ + "$mongoUri" +sleep 15 +compare 'admin' 'db.getUser("user-two")' "$mongoUri" "user-two-update-roles" + +desc 'check user recreated after deleted from DB' +run_mongo \ + 'use admin\n db.dropUser("user-two")' \ + "$mongoUri" +sleep 15 +compare 'admin' 'db.getUser("user-two")' "$mongoUri" "user-two-update-roles" + +desc 'check new user created after updated user name via CR' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-three", + "db":"admin", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster 3 + +compare 'admin' 'db.getUser("user-three")' "$mongoUri" "user-three-admin-db" +compare 'admin' 'db.getUser("user-two")' "$mongoUri" "user-two-update-roles" + +# user-three and user-two should be in the DB +check_mongo_auth "$userTwo:$userTwoNewPass@$cluster-0.$cluster.$namespace" +check_mongo_auth "user-three:$userTwoNewPass@$cluster-0.$cluster.$namespace" + +desc 'check new user created after updated user db via CR' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-three", + "db":"newDb", + "passwordSecretRef": { + "name": "user-two", + "key": "userTwoPassKey" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster 3 +compare 'newDb' 'db.getUser("user-three")' "$mongoUri" "user-three-newDb-db" +compare 'admin' 'db.getUser("user-three")' "$mongoUri" "user-three-admin-db" + +desc 'check new user created with default db and secret password key' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"users":[ + { + "name":"user-four", + "passwordSecretRef": { + "name": "user-two" + }, + "roles": [ + {"db":"admin","name":"clusterAdmin"} + ] + } + ]} + }' +wait_for_running $cluster 3 +compare 'admin' 'db.getUser("user-four")' "$mongoUri" "user-four" + +# ======================== Roles ======================== + +desc 'check user role on cluster initialization' +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-one" + +desc 'check role recreated after deleted from DB' +run_mongo \ + 'use admin\n db.dropRole("role-one")' \ + "$mongoUri" +sleep 15 +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-one" + +desc 'delete initial role from CR and create a new one' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-two", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ], + "roles": [ + { + "role": "read", + "db": "admin" + } + ] + } + ] + }}' +wait_for_running $cluster 3 +compare 'admin' 'db.getRole("role-one", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-one" +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-two" + +desc 'check role update from CR' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-two", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ] + } + ] + }}' +wait_for_running $cluster 3 +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-two-updated" + +desc 'check role update from DB' +run_mongo \ + 'use admin\n db.updateRole( "role-two",{privileges:[{resource: {db:"config", collection:"" }, actions: ["find", "update"]}]})' \ + "$mongoUri" +sleep 15 +compare 'admin' 'db.getRole("role-two", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-two-updated" + +desc 'check new role created after updated role name via CR' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": {"roles":[ + { + "role": "role-three", + "db": "admin", + "privileges": [ + { + "resource": { + "db": "config", + "collection": "" + }, + "actions": [ + "find" + ] + } + ] + } + ] + }}' +wait_for_running $cluster 3 +compare 'admin' 'db.getRole("role-three", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-three" + +desc 'check creating multiple roles and the users in a single CR apply' +kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ + "spec": { + "roles": [ + { + "role": "role-four", + "db": "testAdmin1", + "privileges": [ + { + "resource": { + "db": "testAdmin1", + "collection": "" + }, + "actions": [ + "find", + "listIndexes", + "listCollections" + ] + }, + { + "resource": { + "db": "testAdmin1", + "collection": "system.profile" + }, + "actions": [ + "dbStats", + "collStats", + "indexStats" + ] + }, + { + "resource": { + "db": "testAdmin1", + "collection": "system.version" + }, + "actions": [ + "find" + ] + } + ] + }, + { + "role": "role-five", + "db": "testAdmin2", + "privileges": [ + { + "resource": { + "db": "testAdmin2", + "collection": "" + }, + "actions": [ + "find", + "listIndexes", + "listCollections" + ] + }, + { + "resource": { + "db": "testAdmin2", + "collection": "system.profile" + }, + "actions": [ + "dbStats", + "collStats", + "indexStats" + ] + }, + { + "resource": { + "db": "testAdmin2", + "collection": "system.version" + }, + "actions": [ + "find" + ] + } + ] + } + ], + "users": [ + { + "name": "user-five", + "db": "testAdmin", + "passwordSecretRef": { + "name": "user-one", + "key": "userOnePassKey" + }, + "roles": [ + { + "name": "role-four", + "db": "testAdmin1" + }, + { + "name": "role-five", + "db": "testAdmin2" + } + ] + }, + { + "name": "user-six", + "db": "testAdmin", + "passwordSecretRef": { + "name": "user-one", + "key": "userOnePassKey" + }, + "roles": [ + { + "name": "role-five", + "db": "testAdmin2" + } + ] + } + ] + }}' +wait_for_running $cluster 3 +compare 'testAdmin1' 'db.getRole("role-four", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-four" +compare 'testAdmin2' 'db.getRole("role-five", {showPrivileges: true, showAuthenticationRestrictions: true})' \ + "$mongoUri" "role-five" +compare 'testAdmin' 'db.getUser("user-five")' "$mongoUri" "user-five" +compare 'testAdmin' 'db.getUser("user-six")' "$mongoUri" "user-six" + +destroy $namespace + +desc 'test passed' diff --git a/e2e-tests/run-distro.csv b/e2e-tests/run-distro.csv index da523925d..c94b70fb5 100644 --- a/e2e-tests/run-distro.csv +++ b/e2e-tests/run-distro.csv @@ -2,6 +2,8 @@ arbiter balancer custom-replset-name custom-tls +custom-users-roles +custom-users-roles-sharded data-at-rest-encryption data-sharded default-cr diff --git a/e2e-tests/run-pr.csv b/e2e-tests/run-pr.csv index 6f57365b1..ee1bda43d 100644 --- a/e2e-tests/run-pr.csv +++ b/e2e-tests/run-pr.csv @@ -2,6 +2,8 @@ arbiter balancer custom-replset-name custom-tls +custom-users-roles +custom-users-roles-sharded cross-site-sharded data-at-rest-encryption data-sharded diff --git a/e2e-tests/run-release.csv b/e2e-tests/run-release.csv index 44f094a4d..f94e50aed 100644 --- a/e2e-tests/run-release.csv +++ b/e2e-tests/run-release.csv @@ -2,6 +2,8 @@ arbiter balancer custom-replset-name custom-tls +custom-users-roles +custom-users-roles-sharded cross-site-sharded data-at-rest-encryption data-sharded diff --git a/e2e-tests/users/run b/e2e-tests/users/run index 27302bb29..bb61bd132 100755 --- a/e2e-tests/users/run +++ b/e2e-tests/users/run @@ -148,41 +148,6 @@ check_mongo_auth "$user:$pass@$cluster-0.$cluster.$namespace" check_mongo_auth "$user:$pass@$cluster-1.$cluster.$namespace" check_mongo_auth "$user:$pass@$cluster-2.$cluster.$namespace" -desc 'custom user defined users' -kubectl_bin apply -f "${test_dir}/conf/custom-user-secrets.yml" -sleep 25 - -userOne="user-one" -userOnePass=$(getSecretData "user-one" "userOnePassKey") -check_mongo_auth "$userOne:$userOnePass@$cluster-0.$cluster.$namespace" - -kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ - "spec": {"users":[ - { - "name":"user-two", - "db":"admin", - "passwordSecretRef": { - "name": "user-two", - "key": "userTwoPassKey" - }, - "roles": [{"db":"admin","name":"userAdminAnyDatabase"}] - } - ]} - }' -sleep 20 - -userTwo="user-two" -userTwoPass=$(getSecretData "user-two" "userTwoPassKey") -check_mongo_auth "$userTwo:$userTwoPass@$cluster-0.$cluster.$namespace" -check_mongo_auth "$userOne:$userOnePass@$cluster-0.$cluster.$namespace" - -desc 'update custom user defined user password' -userTwoNewPass="new-user-two-password" -patch_secret "user-two" "userTwoPassKey" "$(echo -n "$userTwoNewPass" | base64)" -sleep 25 - -check_mongo_auth "$userTwo:$userTwoNewPass@$cluster-0.$cluster.$namespace" - destroy $namespace desc 'test passed' diff --git a/e2e-tests/version-service/conf/crd.yaml b/e2e-tests/version-service/conf/crd.yaml index 04b14a1f2..13b4aaa26 100644 --- a/e2e-tests/version-service/conf/crd.yaml +++ b/e2e-tests/version-service/conf/crd.yaml @@ -8908,6 +8908,64 @@ spec: - size type: object type: array + roles: + items: + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - privileges + - role + type: object + type: array schedulerName: type: string secrets: @@ -19167,14 +19225,10 @@ spec: key: type: string name: - default: "" type: string - optional: - type: boolean required: - - key + - name type: object - x-kubernetes-map-type: atomic roles: items: properties: @@ -19188,7 +19242,6 @@ spec: type: object type: array required: - - db - name - passwordSecretRef - roles diff --git a/pkg/apis/psmdb/v1/psmdb_types.go b/pkg/apis/psmdb/v1/psmdb_types.go index 31f33a7ba..b505f0473 100644 --- a/pkg/apis/psmdb/v1/psmdb_types.go +++ b/pkg/apis/psmdb/v1/psmdb_types.go @@ -93,19 +93,58 @@ type PerconaServerMongoDBSpec struct { MultiCluster MultiCluster `json:"multiCluster,omitempty"` TLS *TLSSpec `json:"tls,omitempty"` Users []User `json:"users,omitempty"` + Roles []Role `json:"roles,omitempty"` VolumeExpansionEnabled bool `json:"enableVolumeExpansion,omitempty"` } +type UserRole struct { + Name string `json:"name"` + DB string `json:"db"` +} + +type SecretKeySelector struct { + Name string `json:"name"` + Key string `json:"key,omitempty"` +} + type User struct { - Name string `json:"name"` - Db string `json:"db"` - PasswordSecretRef corev1.SecretKeySelector `json:"passwordSecretRef"` - Roles []Role `json:"roles"` + Name string `json:"name"` + DB string `json:"db,omitempty"` + PasswordSecretRef SecretKeySelector `json:"passwordSecretRef"` + Roles []UserRole `json:"roles"` +} + +func (u *User) UserID() string { + return u.DB + "." + u.Name +} + +type RoleAuthenticationRestriction struct { + ClientSource []string `json:"clientSource,omitempty"` + ServerAddress []string `json:"serverAddress,omitempty"` +} + +type RoleResource struct { + Collection string `json:"collection,omitempty"` + DB string `json:"db,omitempty"` + Cluster *bool `json:"cluster,omitempty"` +} + +type RolePrivilege struct { + Actions []string `json:"actions"` + Resource RoleResource `json:"resource,omitempty"` +} + +type InheritenceRole struct { + Role string `json:"role"` + DB string `json:"db"` } type Role struct { - Name string `json:"name"` - Db string `json:"db"` + Role string `json:"role"` + DB string `json:"db"` + Privileges []RolePrivilege `json:"privileges"` + AuthenticationRestrictions []RoleAuthenticationRestriction `json:"authenticationRestrictions,omitempty"` + Roles []InheritenceRole `json:"roles,omitempty"` } type UnsafeFlags struct { @@ -1071,14 +1110,14 @@ const ( EnvPMMServerAPIKey = PMMAPIKey ) -type UserRole string +type SystemUserRole string const ( - RoleDatabaseAdmin UserRole = "databaseAdmin" - RoleClusterAdmin UserRole = "clusterAdmin" - RoleUserAdmin UserRole = "userAdmin" - RoleClusterMonitor UserRole = "clusterMonitor" - RoleBackup UserRole = "backup" + RoleDatabaseAdmin SystemUserRole = "databaseAdmin" + RoleClusterAdmin SystemUserRole = "clusterAdmin" + RoleUserAdmin SystemUserRole = "userAdmin" + RoleClusterMonitor SystemUserRole = "clusterMonitor" + RoleBackup SystemUserRole = "backup" ) func InternalUserSecretName(cr *PerconaServerMongoDB) string { diff --git a/pkg/apis/psmdb/v1/zz_generated.deepcopy.go b/pkg/apis/psmdb/v1/zz_generated.deepcopy.go index 1cdfafec8..007038975 100644 --- a/pkg/apis/psmdb/v1/zz_generated.deepcopy.go +++ b/pkg/apis/psmdb/v1/zz_generated.deepcopy.go @@ -392,6 +392,21 @@ func (in HorizonsSpec) DeepCopy() HorizonsSpec { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InheritenceRole) DeepCopyInto(out *InheritenceRole) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InheritenceRole. +func (in *InheritenceRole) DeepCopy() *InheritenceRole { + if in == nil { + return nil + } + out := new(InheritenceRole) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LivenessProbeExtended) DeepCopyInto(out *LivenessProbeExtended) { *out = *in @@ -1292,6 +1307,13 @@ func (in *PerconaServerMongoDBSpec) DeepCopyInto(out *PerconaServerMongoDBSpec) (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerconaServerMongoDBSpec. @@ -1646,6 +1668,25 @@ func (in *Retryer) DeepCopy() *Retryer { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Role) DeepCopyInto(out *Role) { *out = *in + if in.Privileges != nil { + in, out := &in.Privileges, &out.Privileges + *out = make([]RolePrivilege, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AuthenticationRestrictions != nil { + in, out := &in.AuthenticationRestrictions, &out.AuthenticationRestrictions + *out = make([]RoleAuthenticationRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]InheritenceRole, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. @@ -1658,6 +1699,72 @@ func (in *Role) DeepCopy() *Role { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleAuthenticationRestriction) DeepCopyInto(out *RoleAuthenticationRestriction) { + *out = *in + if in.ClientSource != nil { + in, out := &in.ClientSource, &out.ClientSource + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddress != nil { + in, out := &in.ServerAddress, &out.ServerAddress + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleAuthenticationRestriction. +func (in *RoleAuthenticationRestriction) DeepCopy() *RoleAuthenticationRestriction { + if in == nil { + return nil + } + out := new(RoleAuthenticationRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolePrivilege) DeepCopyInto(out *RolePrivilege) { + *out = *in + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Resource.DeepCopyInto(&out.Resource) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolePrivilege. +func (in *RolePrivilege) DeepCopy() *RolePrivilege { + if in == nil { + return nil + } + out := new(RolePrivilege) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleResource) DeepCopyInto(out *RoleResource) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleResource. +func (in *RoleResource) DeepCopy() *RoleResource { + if in == nil { + return nil + } + out := new(RoleResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3ServiceSideEncryption) DeepCopyInto(out *S3ServiceSideEncryption) { *out = *in @@ -1673,6 +1780,21 @@ func (in *S3ServiceSideEncryption) DeepCopy() *S3ServiceSideEncryption { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretsSpec) DeepCopyInto(out *SecretsSpec) { *out = *in @@ -1809,10 +1931,10 @@ func (in *UpgradeOptions) DeepCopy() *UpgradeOptions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *User) DeepCopyInto(out *User) { *out = *in - in.PasswordSecretRef.DeepCopyInto(&out.PasswordSecretRef) + out.PasswordSecretRef = in.PasswordSecretRef if in.Roles != nil { in, out := &in.Roles, &out.Roles - *out = make([]Role, len(*in)) + *out = make([]UserRole, len(*in)) copy(*out, *in) } } @@ -1827,6 +1949,21 @@ func (in *User) DeepCopy() *User { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserRole) DeepCopyInto(out *UserRole) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserRole. +func (in *UserRole) DeepCopy() *UserRole { + if in == nil { + return nil + } + out := new(UserRole) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) { *out = *in diff --git a/pkg/controller/perconaservermongodb/connections.go b/pkg/controller/perconaservermongodb/connections.go index 9875bfcdd..af4912955 100644 --- a/pkg/controller/perconaservermongodb/connections.go +++ b/pkg/controller/perconaservermongodb/connections.go @@ -13,9 +13,9 @@ import ( ) type MongoClientProvider interface { - Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.UserRole) (mongo.Client, error) - Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role api.UserRole) (mongo.Client, error) - Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role api.UserRole, host string, tlsEnabled bool) (mongo.Client, error) + Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.SystemUserRole) (mongo.Client, error) + Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role api.SystemUserRole) (mongo.Client, error) + Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role api.SystemUserRole, host string, tlsEnabled bool) (mongo.Client, error) } func (r *ReconcilePerconaServerMongoDB) MongoClientProvider() MongoClientProvider { @@ -29,7 +29,7 @@ type mongoClientProvider struct { k8sclient client.Client } -func (p *mongoClientProvider) Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.UserRole) (mongo.Client, error) { +func (p *mongoClientProvider) Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.SystemUserRole) (mongo.Client, error) { c, err := getInternalCredentials(ctx, p.k8sclient, cr, role) if err != nil { return nil, errors.Wrap(err, "failed to get credentials") @@ -38,7 +38,7 @@ func (p *mongoClientProvider) Mongo(ctx context.Context, cr *api.PerconaServerMo return psmdb.MongoClient(ctx, p.k8sclient, cr, rs, c) } -func (p *mongoClientProvider) Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role api.UserRole) (mongo.Client, error) { +func (p *mongoClientProvider) Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role api.SystemUserRole) (mongo.Client, error) { c, err := getInternalCredentials(ctx, p.k8sclient, cr, role) if err != nil { return nil, errors.Wrap(err, "failed to get credentials") @@ -47,7 +47,7 @@ func (p *mongoClientProvider) Mongos(ctx context.Context, cr *api.PerconaServerM return psmdb.MongosClient(ctx, p.k8sclient, cr, c) } -func (p *mongoClientProvider) Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role api.UserRole, host string, tlsEnabled bool) (mongo.Client, error) { +func (p *mongoClientProvider) Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role api.SystemUserRole, host string, tlsEnabled bool) (mongo.Client, error) { c, err := getInternalCredentials(ctx, p.k8sclient, cr, role) if err != nil { return nil, errors.Wrap(err, "failed to get credentials") @@ -56,15 +56,15 @@ func (p *mongoClientProvider) Standalone(ctx context.Context, cr *api.PerconaSer return psmdb.StandaloneClient(ctx, p.k8sclient, cr, c, host, tlsEnabled) } -func (r *ReconcilePerconaServerMongoDB) mongoClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.UserRole) (mongo.Client, error) { +func (r *ReconcilePerconaServerMongoDB) mongoClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.SystemUserRole) (mongo.Client, error) { return r.MongoClientProvider().Mongo(ctx, cr, rs, role) } -func (r *ReconcilePerconaServerMongoDB) mongosClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, role api.UserRole) (mongo.Client, error) { +func (r *ReconcilePerconaServerMongoDB) mongosClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, role api.SystemUserRole) (mongo.Client, error) { return r.MongoClientProvider().Mongos(ctx, cr, role) } -func (r *ReconcilePerconaServerMongoDB) standaloneClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.UserRole, pod corev1.Pod) (mongo.Client, error) { +func (r *ReconcilePerconaServerMongoDB) standaloneClientWithRole(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.SystemUserRole, pod corev1.Pod) (mongo.Client, error) { host, err := psmdb.MongoHost(ctx, r.client, cr, cr.Spec.ClusterServiceDNSMode, rs, rs.Expose.Enabled, pod) if err != nil { return nil, errors.Wrap(err, "failed to get mongo host") diff --git a/pkg/controller/perconaservermongodb/connections_test.go b/pkg/controller/perconaservermongodb/connections_test.go index f619ff1a6..af86b6334 100644 --- a/pkg/controller/perconaservermongodb/connections_test.go +++ b/pkg/controller/perconaservermongodb/connections_test.go @@ -365,21 +365,21 @@ type fakeMongoClientProvider struct { connectionCount *int } -func (g *fakeMongoClientProvider) Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.UserRole) (mongo.Client, error) { +func (g *fakeMongoClientProvider) Mongo(ctx context.Context, cr *api.PerconaServerMongoDB, rs *api.ReplsetSpec, role api.SystemUserRole) (mongo.Client, error) { *g.connectionCount++ fakeClient := mongoFake.NewClient() return &fakeMongoClient{pods: g.pods, cr: g.cr, connectionCount: g.connectionCount, Client: fakeClient}, nil } -func (g *fakeMongoClientProvider) Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role api.UserRole) (mongo.Client, error) { +func (g *fakeMongoClientProvider) Mongos(ctx context.Context, cr *api.PerconaServerMongoDB, role api.SystemUserRole) (mongo.Client, error) { *g.connectionCount++ fakeClient := mongoFake.NewClient() return &fakeMongoClient{pods: g.pods, cr: g.cr, connectionCount: g.connectionCount, Client: fakeClient}, nil } -func (g *fakeMongoClientProvider) Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role api.UserRole, host string, tlsEnabled bool) (mongo.Client, error) { +func (g *fakeMongoClientProvider) Standalone(ctx context.Context, cr *api.PerconaServerMongoDB, role api.SystemUserRole, host string, tlsEnabled bool) (mongo.Client, error) { *g.connectionCount++ fakeClient := mongoFake.NewClient() @@ -403,13 +403,13 @@ func (c *fakeMongoClient) GetFCV(ctx context.Context) (string, error) { return "4.0", nil } -func (c *fakeMongoClient) GetRole(ctx context.Context, role string) (*mongo.Role, error) { +func (c *fakeMongoClient) GetRole(ctx context.Context, db, role string) (*mongo.Role, error) { return &mongo.Role{ Role: string(api.RoleClusterAdmin), }, nil } -func (c *fakeMongoClient) GetUserInfo(ctx context.Context, username string) (*mongo.User, error) { +func (c *fakeMongoClient) GetUserInfo(ctx context.Context, username, db string) (*mongo.User, error) { return &mongo.User{ Roles: []map[string]interface{}{}, }, nil diff --git a/pkg/controller/perconaservermongodb/custom_users.go b/pkg/controller/perconaservermongodb/custom_users.go index c59daf2e4..78ad52ba4 100644 --- a/pkg/controller/perconaservermongodb/custom_users.go +++ b/pkg/controller/perconaservermongodb/custom_users.go @@ -10,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" api "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" @@ -17,7 +18,7 @@ import ( ) func (r *ReconcilePerconaServerMongoDB) reconcileCustomUsers(ctx context.Context, cr *api.PerconaServerMongoDB) error { - if cr.Spec.Users == nil || len(cr.Spec.Users) == 0 { + if cr.Spec.Users == nil && len(cr.Spec.Users) == 0 && cr.Spec.Roles == nil && len(cr.Spec.Roles) == 0 { return nil } @@ -44,6 +45,15 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCustomUsers(ctx context.Context } }() + err = handleRoles(ctx, cr, cli) + if err != nil { + return errors.Wrap(err, "handle roles") + } + + if len(cr.Spec.Users) == 0 { + return nil + } + sysUsersSecret := corev1.Secret{} err = r.client.Get(ctx, types.NamespacedName{ @@ -64,84 +74,152 @@ func (r *ReconcilePerconaServerMongoDB) reconcileCustomUsers(ctx context.Context continue } + if user.DB == "" { + user.DB = "admin" + } + + if user.PasswordSecretRef.Key == "" { + user.PasswordSecretRef.Key = "password" + } + sec, err := getUserSecret(ctx, r.client, cr, user.PasswordSecretRef.Name) if err != nil { log.Error(err, "failed to get user secret", "user", user) continue } - annotationKey := fmt.Sprintf("percona.com/%s-hash", user.Name) - - newHash := sha256Hash(sec.Data[user.PasswordSecretRef.Key]) + userInfo, err := cli.GetUserInfo(ctx, user.Name, user.DB) + if err != nil { + log.Error(err, "get user info") + continue + } - hash, ok := sec.Annotations[annotationKey] - if ok && hash == newHash { + if userInfo == nil { + err = createUser(ctx, r.client, cli, &user, &sec) + if err != nil { + return errors.Wrapf(err, "create user %s", user.Name) + } continue } - if sec.Annotations == nil { - sec.Annotations = make(map[string]string) + err = updatePass(ctx, r.client, cli, &user, userInfo, &sec, cr.Name) + if err != nil { + log.Error(err, "update user pass", "user", user.Name) + continue } - userInfo, err := cli.GetUserInfo(ctx, user.Name) + err = updateRoles(ctx, cli, &user, userInfo) if err != nil { - log.Error(err, "get user info") + log.Error(err, "update user roles", "user", user.Name) continue } + } - if userInfo != nil && hash != newHash { - log.Info("User password changed, updating it.", "user", user.Name) - err := cli.UpdateUserPass(ctx, user.Db, user.Name, string(sec.Data[user.PasswordSecretRef.Key])) - if err != nil { - log.Error(err, "failed to update user pass", "user", user.Name) - continue - } - sec.Annotations[annotationKey] = string(newHash) - if err := r.client.Update(ctx, &sec); err != nil { - log.Error(err, "update user secret", "user", user.Name, "secret", sec.Name) - continue - } - log.Info("User updated", "user", user.Name) + return nil +} + +func handleRoles(ctx context.Context, cr *api.PerconaServerMongoDB, cli mongo.Client) error { + if len(cr.Spec.Roles) == 0 { + return nil + } + + for _, role := range cr.Spec.Roles { + roleInfo, err := cli.GetRole(ctx, role.DB, role.Role) + if err != nil { + return errors.Wrap(err, "mongo get role") } - roles := make([]map[string]interface{}, 0) - for _, role := range user.Roles { - roles = append(roles, map[string]interface{}{ - "role": role.Name, - "db": role.Db, - }) + mr, err := toMongoRoleModel(role) + if err != nil { + return err } - if userInfo != nil && !reflect.DeepEqual(userInfo.Roles, roles) { - log.Info("User roles changed, updating them.", "user", user.Name) - err := cli.UpdateUserRoles(ctx, user.Db, user.Name, roles) + if roleInfo == nil { + err := cli.CreateRole(ctx, role.DB, *mr) if err != nil { - log.Error(err, "failed to update user roles", "user", user.Name) - continue + return errors.Wrapf(err, "create role %s", role.Role) } + continue } - if userInfo != nil { - continue + if !compareRole(mr, roleInfo) { + err := cli.UpdateRole(ctx, role.DB, *mr) + if err != nil { + return errors.Wrapf(err, "update role %s", role.Role) + } } + } - log.Info("Creating user", "user", user.Name) - err = cli.CreateUser(ctx, user.Db, user.Name, string(sec.Data[user.PasswordSecretRef.Key]), roles...) - if err != nil { - log.Error(err, "failed to create user", "user", user.Name) - continue + return nil +} + +func compareRole(r1, r2 *mongo.Role) bool { + if !comparePrivileges(r1.Privileges, r2.Privileges) { + return false + } + + if len(r1.AuthenticationRestrictions) != len(r2.AuthenticationRestrictions) { + return false + } + if !reflect.DeepEqual(r1.AuthenticationRestrictions, r2.AuthenticationRestrictions) { + return false + } + + if len(r1.Roles) != len(r2.Roles) { + return false + } + if !reflect.DeepEqual(r1.Roles, r2.Roles) { + return false + } + + return true +} + +func toMongoRoleModel(role api.Role) (*mongo.Role, error) { + mr := &mongo.Role{ + Role: role.Role, + DB: role.DB, + } + + for _, r := range role.Roles { + mr.Roles = append(mr.Roles, mongo.InheritenceRole{ + Role: r.Role, + DB: r.DB, + }) + } + + for _, p := range role.Privileges { + if p.Resource.Cluster != nil && (p.Resource.DB != "" || p.Resource.Collection != "") { + return nil, errors.New("field role.privilege.resource must have exactly db and collection set, or have only cluster set") } - sec.Annotations[annotationKey] = string(newHash) - if err := r.client.Update(ctx, &sec); err != nil { - log.Error(err, "update user secret", "user", user.Name, "secret", sec.Name) - continue + rp := mongo.RolePrivilege{ + Actions: p.Actions, + Resource: make(map[string]interface{}, 3), } - log.Info("User created", "user", user.Name) + if p.Resource.Cluster != nil { + rp.Resource["cluster"] = p.Resource.Cluster + } else { + rp.Resource["db"] = p.Resource.DB + rp.Resource["collection"] = p.Resource.Collection + } + + mr.Privileges = append(mr.Privileges, rp) } - return nil + if role.AuthenticationRestrictions != nil { + for _, ar := range role.AuthenticationRestrictions { + mr.AuthenticationRestrictions = append(mr.AuthenticationRestrictions, mongo.RoleAuthenticationRestriction{ + ClientSource: ar.ClientSource, + ServerAddress: ar.ServerAddress, + }) + } + } else { + mr.AuthenticationRestrictions = nil + } + + return mr, nil } // sysUserNames returns a set of system user names from the sysUsersSecret. @@ -154,3 +232,112 @@ func sysUserNames(sysUsersSecret corev1.Secret) map[string]struct{} { } return sysUserNames } + +func updatePass( + ctx context.Context, + cli client.Client, + mongoCli mongo.Client, + user *api.User, + userInfo *mongo.User, + secret *corev1.Secret, + cluster string) error { + log := logf.FromContext(ctx) + + if userInfo == nil { + return nil + } + + annotationKey := fmt.Sprintf("percona.com/%s-%s-hash", cluster, user.Name) + + newHash := sha256Hash(secret.Data[user.PasswordSecretRef.Key]) + + hash, ok := secret.Annotations[annotationKey] + if ok && hash == newHash { + return nil + } + + if secret.Annotations == nil { + secret.Annotations = make(map[string]string) + } + + log.Info("User password changed, updating it.", "user", user.UserID()) + + err := mongoCli.UpdateUserPass(ctx, user.DB, user.Name, string(secret.Data[user.PasswordSecretRef.Key])) + if err != nil { + return errors.Wrapf(err, "update user %s password", user.Name) + } + + secret.Annotations[annotationKey] = string(newHash) + if err := cli.Update(ctx, secret); err != nil { + return errors.Wrapf(err, "update secret %s", secret.Name) + } + + log.Info("User updated", "user", user.UserID()) + + return nil +} + +func updateRoles( + ctx context.Context, + mongoCli mongo.Client, + user *api.User, + userInfo *mongo.User) error { + log := logf.FromContext(ctx) + + if userInfo == nil { + return nil + } + + roles := make([]map[string]interface{}, 0) + for _, role := range user.Roles { + roles = append(roles, map[string]interface{}{ + "role": role.Name, + "db": role.DB, + }) + } + + if reflect.DeepEqual(userInfo.Roles, roles) { + return nil + } + + log.Info("User roles changed, updating them.", "user", user.UserID()) + err := mongoCli.UpdateUserRoles(ctx, user.DB, user.Name, roles) + if err != nil { + return err + } + + return nil +} + +func createUser( + ctx context.Context, + cli client.Client, + mongoCli mongo.Client, + user *api.User, + secret *corev1.Secret) error { + log := logf.FromContext(ctx) + + annotationKey := fmt.Sprintf("percona.com/%s-hash", user.Name) + + roles := make([]map[string]interface{}, 0) + for _, role := range user.Roles { + roles = append(roles, map[string]interface{}{ + "role": role.Name, + "db": role.DB, + }) + } + + log.Info("Creating user", "user", user.UserID()) + err := mongoCli.CreateUser(ctx, user.DB, user.Name, string(secret.Data[user.PasswordSecretRef.Key]), roles...) + if err != nil { + return err + } + + secret.Annotations[annotationKey] = string(sha256Hash(secret.Data[user.PasswordSecretRef.Key])) + if err := cli.Update(ctx, secret); err != nil { + return err + } + + log.Info("User created", "user", user.UserID()) + return nil +} diff --git a/pkg/controller/perconaservermongodb/mgo.go b/pkg/controller/perconaservermongodb/mgo.go index 3124f5d03..b88d12749 100644 --- a/pkg/controller/perconaservermongodb/mgo.go +++ b/pkg/controller/perconaservermongodb/mgo.go @@ -748,7 +748,7 @@ func (r *ReconcilePerconaServerMongoDB) handleReplicaSetNoPrimary(ctx context.Co return errNoRunningMongodContainers } -func getRoles(cr *api.PerconaServerMongoDB, role api.UserRole) []map[string]interface{} { +func getRoles(cr *api.PerconaServerMongoDB, role api.SystemUserRole) []map[string]interface{} { roles := make([]map[string]interface{}, 0) switch role { case api.RoleDatabaseAdmin: @@ -836,16 +836,23 @@ func compareTags(tags mongo.ReplsetTags, selector api.PrimaryPreferTagSelectorSp } func (r *ReconcilePerconaServerMongoDB) createOrUpdateSystemRoles(ctx context.Context, cli mongo.Client, role string, privileges []mongo.RolePrivilege) error { - roleInfo, err := cli.GetRole(ctx, role) + roleInfo, err := cli.GetRole(ctx, "admin", role) if err != nil { return errors.Wrap(err, "mongo get role") } + + mo := mongo.Role{ + Role: role, + Privileges: privileges, + Roles: []mongo.InheritenceRole{}, + } + if roleInfo == nil { - err = cli.CreateRole(ctx, role, privileges, []interface{}{}) + err = cli.CreateRole(ctx, "admin", mo) return errors.Wrapf(err, "create role %s", role) } if !comparePrivileges(privileges, roleInfo.Privileges) { - err = cli.UpdateRole(ctx, role, privileges, []interface{}{}) + err = cli.UpdateRole(ctx, "admin", mo) return errors.Wrapf(err, "update role") } return nil @@ -952,7 +959,7 @@ func (r *ReconcilePerconaServerMongoDB) createOrUpdateSystemUsers(ctx context.Co return errors.Wrap(err, "create or update system role") } - users := []api.UserRole{api.RoleClusterAdmin, api.RoleClusterMonitor, api.RoleBackup} + users := []api.SystemUserRole{api.RoleClusterAdmin, api.RoleClusterMonitor, api.RoleBackup} if cr.CompareVersion("1.13.0") >= 0 { users = append(users, api.RoleDatabaseAdmin) } @@ -963,7 +970,7 @@ func (r *ReconcilePerconaServerMongoDB) createOrUpdateSystemUsers(ctx context.Co log.Error(err, "failed to get credentials", "role", role) continue } - user, err := cli.GetUserInfo(ctx, creds.Username) + user, err := cli.GetUserInfo(ctx, creds.Username, "admin") if err != nil { return errors.Wrap(err, "get user info") } diff --git a/pkg/controller/perconaservermongodb/secrets.go b/pkg/controller/perconaservermongodb/secrets.go index ef43e5c50..c618b6508 100644 --- a/pkg/controller/perconaservermongodb/secrets.go +++ b/pkg/controller/perconaservermongodb/secrets.go @@ -24,11 +24,11 @@ func getUserSecret(ctx context.Context, cl client.Reader, cr *api.PerconaServerM return secrets, errors.Wrap(err, "get user secrets") } -func getInternalCredentials(ctx context.Context, cl client.Reader, cr *api.PerconaServerMongoDB, role api.UserRole) (psmdb.Credentials, error) { +func getInternalCredentials(ctx context.Context, cl client.Reader, cr *api.PerconaServerMongoDB, role api.SystemUserRole) (psmdb.Credentials, error) { return getCredentials(ctx, cl, cr, api.UserSecretName(cr), role) } -func getCredentials(ctx context.Context, cl client.Reader, cr *api.PerconaServerMongoDB, name string, role api.UserRole) (psmdb.Credentials, error) { +func getCredentials(ctx context.Context, cl client.Reader, cr *api.PerconaServerMongoDB, name string, role api.SystemUserRole) (psmdb.Credentials, error) { creds := psmdb.Credentials{} usersSecret, err := getUserSecret(ctx, cl, cr, name) if err != nil { diff --git a/pkg/controller/perconaservermongodbrestore/physical.go b/pkg/controller/perconaservermongodbrestore/physical.go index aa821fc74..4908f2b5d 100644 --- a/pkg/controller/perconaservermongodbrestore/physical.go +++ b/pkg/controller/perconaservermongodbrestore/physical.go @@ -574,7 +574,7 @@ func (r *ReconcilePerconaServerMongoDBRestore) prepareStatefulSetsForPhysicalRes return nil } -func (r *ReconcilePerconaServerMongoDBRestore) getUserCredentials(ctx context.Context, cluster *psmdbv1.PerconaServerMongoDB, role psmdbv1.UserRole) (psmdb.Credentials, error) { +func (r *ReconcilePerconaServerMongoDBRestore) getUserCredentials(ctx context.Context, cluster *psmdbv1.PerconaServerMongoDB, role psmdbv1.SystemUserRole) (psmdb.Credentials, error) { creds := psmdb.Credentials{} usersSecret := corev1.Secret{} diff --git a/pkg/psmdb/mongo/fake/client.go b/pkg/psmdb/mongo/fake/client.go index a8efaec1e..c5a964094 100644 --- a/pkg/psmdb/mongo/fake/client.go +++ b/pkg/psmdb/mongo/fake/client.go @@ -58,15 +58,15 @@ func (c *fakeMongoClient) ReadConfig(ctx context.Context) (mongo.RSConfig, error return mongo.RSConfig{}, nil } -func (c *fakeMongoClient) CreateRole(ctx context.Context, role string, privileges []mongo.RolePrivilege, roles []interface{}) error { +func (c *fakeMongoClient) CreateRole(ctx context.Context, db string, role mongo.Role) error { return nil } -func (c *fakeMongoClient) UpdateRole(ctx context.Context, role string, privileges []mongo.RolePrivilege, roles []interface{}) error { +func (c *fakeMongoClient) UpdateRole(ctx context.Context, db string, role mongo.Role) error { return nil } -func (c *fakeMongoClient) GetRole(ctx context.Context, role string) (*mongo.Role, error) { +func (c *fakeMongoClient) GetRole(ctx context.Context, db, role string) (*mongo.Role, error) { return nil, nil } @@ -134,7 +134,7 @@ func (c *fakeMongoClient) IsMaster(ctx context.Context) (*mongo.IsMasterResp, er return nil, nil } -func (c *fakeMongoClient) GetUserInfo(ctx context.Context, username string) (*mongo.User, error) { +func (c *fakeMongoClient) GetUserInfo(ctx context.Context, username string, db string) (*mongo.User, error) { return nil, nil } diff --git a/pkg/psmdb/mongo/models.go b/pkg/psmdb/mongo/models.go index 03cb2a713..6da1718b4 100644 --- a/pkg/psmdb/mongo/models.go +++ b/pkg/psmdb/mongo/models.go @@ -235,23 +235,44 @@ func (s *Status) Primary() *Member { return nil } +type RoleAuthenticationRestriction struct { + ClientSource []string `bson:"clientSource" json:"clientSource"` + ServerAddress []string `bson:"serverAddress" json:"serverAddress"` +} + type RolePrivilege struct { Resource map[string]interface{} `bson:"resource" json:"resource"` Actions []string `bson:"actions" json:"actions"` } +type InheritenceRole struct { + Role string `bson:"role" json:"role"` + DB string `bson:"db" json:"db"` +} + type Role struct { - Role string `bson:"role" json:"role"` - Roles []map[string]interface{} `bson:"roles" json:"roles"` - Privileges []RolePrivilege `bson:"privileges" json:"privileges"` + Role string `bson:"role" json:"role"` + DB string `bson:"db" json:"db"` + Roles []InheritenceRole `bson:"roles" json:"roles"` + Privileges []RolePrivilege `bson:"privileges" json:"privileges"` + AuthenticationRestrictions []RoleAuthenticationRestriction `bson:"authenticationRestrictions" json:"authenticationRestrictions"` +} + +type GetRoleResult struct { + Role string `bson:"role" json:"role"` + DB string `bson:"db" json:"db"` + Roles []InheritenceRole `bson:"roles" json:"roles"` + Privileges []RolePrivilege `bson:"privileges" json:"privileges"` + AuthenticationRestrictions [][]RoleAuthenticationRestriction `bson:"authenticationRestrictions" json:"authenticationRestrictions"` } type RoleInfo struct { - Roles []Role `bson:"roles" json:"roles"` + Roles []GetRoleResult `bson:"roles" json:"roles"` OKResponse `bson:",inline"` } type User struct { + DB string `bson:"db" json:"db"` Roles []map[string]interface{} `bson:"roles" json:"roles"` } diff --git a/pkg/psmdb/mongo/mongo.go b/pkg/psmdb/mongo/mongo.go index 153efe965..60268605d 100644 --- a/pkg/psmdb/mongo/mongo.go +++ b/pkg/psmdb/mongo/mongo.go @@ -34,9 +34,9 @@ type Client interface { SetDefaultRWConcern(ctx context.Context, readConcern, writeConcern string) error ReadConfig(ctx context.Context) (RSConfig, error) - CreateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error - UpdateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error - GetRole(ctx context.Context, role string) (*Role, error) + CreateRole(ctx context.Context, db string, role Role) error + UpdateRole(ctx context.Context, db string, role Role) error + GetRole(ctx context.Context, db, role string) (*Role, error) CreateUser(ctx context.Context, db, user, pwd string, roles ...map[string]interface{}) error AddShard(ctx context.Context, rsName, host string) error WriteConfig(ctx context.Context, cfg RSConfig, force bool) error @@ -53,7 +53,7 @@ type Client interface { StepDown(ctx context.Context, seconds int, force bool) error Freeze(ctx context.Context, seconds int) error IsMaster(ctx context.Context) (*IsMasterResp, error) - GetUserInfo(ctx context.Context, username string) (*User, error) + GetUserInfo(ctx context.Context, username, db string) (*User, error) UpdateUserRoles(ctx context.Context, db, username string, roles []map[string]interface{}) error UpdateUserPass(ctx context.Context, db, name, pass string) error UpdateUser(ctx context.Context, currName, newName, pass string) error @@ -158,26 +158,32 @@ func (client *mongoClient) ReadConfig(ctx context.Context) (RSConfig, error) { return *resp.Config, nil } -func (client *mongoClient) CreateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error { +func (client *mongoClient) CreateRole(ctx context.Context, db string, role Role) error { resp := OKResponse{} privilegesArr := bson.A{} - for _, p := range privileges { + for _, p := range role.Privileges { privilegesArr = append(privilegesArr, p) } rolesArr := bson.A{} - for _, r := range roles { + for _, r := range role.Roles { rolesArr = append(rolesArr, r) } + authRestrictionsArr := bson.A{} + for _, r := range role.AuthenticationRestrictions { + authRestrictionsArr = append(authRestrictionsArr, r) + } + m := bson.D{ - {Key: "createRole", Value: role}, + {Key: "createRole", Value: role.Role}, {Key: "privileges", Value: privilegesArr}, {Key: "roles", Value: rolesArr}, + {Key: "authenticationRestrictions", Value: authRestrictionsArr}, } - res := client.Database("admin").RunCommand(ctx, m) + res := client.Database(db).RunCommand(ctx, m) if res.Err() != nil { return errors.Wrap(res.Err(), "failed to create role") } @@ -194,26 +200,32 @@ func (client *mongoClient) CreateRole(ctx context.Context, role string, privileg return nil } -func (client *mongoClient) UpdateRole(ctx context.Context, role string, privileges []RolePrivilege, roles []interface{}) error { +func (client *mongoClient) UpdateRole(ctx context.Context, db string, role Role) error { resp := OKResponse{} privilegesArr := bson.A{} - for _, p := range privileges { + for _, p := range role.Privileges { privilegesArr = append(privilegesArr, p) } rolesArr := bson.A{} - for _, r := range roles { + for _, r := range role.Roles { rolesArr = append(rolesArr, r) } + authRestrictionsArr := bson.A{} + for _, r := range role.AuthenticationRestrictions { + authRestrictionsArr = append(authRestrictionsArr, r) + } + m := bson.D{ - {Key: "updateRole", Value: role}, + {Key: "updateRole", Value: role.Role}, {Key: "privileges", Value: privilegesArr}, {Key: "roles", Value: rolesArr}, + {Key: "authenticationRestrictions", Value: authRestrictionsArr}, } - res := client.Database("admin").RunCommand(ctx, m) + res := client.Database(db).RunCommand(ctx, m) if res.Err() != nil { return errors.Wrap(res.Err(), "failed to create role") } @@ -228,14 +240,16 @@ func (client *mongoClient) UpdateRole(ctx context.Context, role string, privileg } return nil + } -func (client *mongoClient) GetRole(ctx context.Context, role string) (*Role, error) { +func (client *mongoClient) GetRole(ctx context.Context, db, role string) (*Role, error) { resp := RoleInfo{} - res := client.Database("admin").RunCommand(ctx, bson.D{ + res := client.Database(db).RunCommand(ctx, bson.D{ {Key: "rolesInfo", Value: role}, {Key: "showPrivileges", Value: true}, + {Key: "showAuthenticationRestrictions", Value: true}, }) if res.Err() != nil { return nil, errors.Wrap(res.Err(), "run command") @@ -251,7 +265,19 @@ func (client *mongoClient) GetRole(ctx context.Context, role string) (*Role, err if len(resp.Roles) == 0 { return nil, nil } - return &resp.Roles[0], nil + + r := &Role{ + Role: role, + DB: resp.Roles[0].DB, + Roles: resp.Roles[0].Roles, + Privileges: resp.Roles[0].Privileges, + } + + if len(resp.Roles[0].AuthenticationRestrictions) > 0 { + r.AuthenticationRestrictions = resp.Roles[0].AuthenticationRestrictions[0] + } + + return r, nil } func (client *mongoClient) CreateUser(ctx context.Context, db, user, pwd string, roles ...map[string]interface{}) error { @@ -570,9 +596,9 @@ func (client *mongoClient) IsMaster(ctx context.Context) (*IsMasterResp, error) return &resp, nil } -func (client *mongoClient) GetUserInfo(ctx context.Context, username string) (*User, error) { +func (client *mongoClient) GetUserInfo(ctx context.Context, username, db string) (*User, error) { resp := UsersInfo{} - res := client.Database("admin").RunCommand(ctx, bson.D{{Key: "usersInfo", Value: username}}) + res := client.Database(db).RunCommand(ctx, bson.D{{Key: "usersInfo", Value: username}}) if res.Err() != nil { return nil, errors.Wrap(res.Err(), "run command") }