diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b671bc362a7b..fd6cbda5b073 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -139,6 +139,8 @@ jobs: arch: x86 - platform: darwin arch: x86_64 + - platform: darwin + arch: aarch64 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/templates/release.yml.jinja b/.github/workflows/templates/release.yml.jinja index ae1216ccbf05..b020138baff8 100644 --- a/.github/workflows/templates/release.yml.jinja +++ b/.github/workflows/templates/release.yml.jinja @@ -173,6 +173,8 @@ permissions: arch: x86 - platform: darwin arch: x86_64 + - platform: darwin + arch: aarch64 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test-packages-action-macos.yml b/.github/workflows/test-packages-action-macos.yml index b027f62bca6d..4dac7599e348 100644 --- a/.github/workflows/test-packages-action-macos.yml +++ b/.github/workflows/test-packages-action-macos.yml @@ -122,7 +122,7 @@ jobs: uses: actions/download-artifact@v3 with: name: salt-${{ inputs.salt-version }}-${{ inputs.arch }}-${{ inputs.pkg-type }} - path: pkg/artifacts/ + path: artifacts/pkg/ - name: Install System Dependencies run: | @@ -130,7 +130,7 @@ jobs: - name: List Packages run: | - tree pkg/artifacts + tree artifacts/pkg/ - name: Download Onedir Tarball as an Artifact uses: actions/download-artifact@v3 @@ -214,6 +214,7 @@ jobs: name: pkg-testrun-artifacts-${{ inputs.distro-slug }}-${{ matrix.tests-chunk }} path: | artifacts + !artifacts/pkg/* !artifacts/salt/* !artifacts/salt-*.tar.* diff --git a/.github/workflows/test-packages-action.yml b/.github/workflows/test-packages-action.yml index 726565cc5689..fb77b855693c 100644 --- a/.github/workflows/test-packages-action.yml +++ b/.github/workflows/test-packages-action.yml @@ -133,7 +133,7 @@ jobs: uses: actions/download-artifact@v3 with: name: ${{ inputs.package-name }}-${{ inputs.salt-version }}-${{ inputs.arch }}-${{ inputs.pkg-type }} - path: pkg/artifacts/ + path: artifacts/pkg/ - name: Download Onedir Tarball as an Artifact uses: actions/download-artifact@v3 @@ -150,7 +150,7 @@ jobs: - name: List Packages run: | - tree pkg/artifacts + tree artifacts/pkg/ - name: Download cached nox.${{ inputs.distro-slug }}.tar.* for session ${{ inputs.nox-session }} uses: actions/cache@v3.3.1 @@ -230,6 +230,7 @@ jobs: name: pkg-testrun-artifacts-${{ inputs.distro-slug }}-${{ matrix.tests-chunk }} path: | artifacts + !artifacts/pkg/* !artifacts/salt/* !artifacts/salt-*.tar.* diff --git a/.gitignore b/.gitignore index 2cfa8e93c6e4..154a3decc157 100644 --- a/.gitignore +++ b/.gitignore @@ -119,7 +119,6 @@ kitchen.local.yml .bundle/ Gemfile.lock /artifacts/ -/pkg/artifacts/ requirements/static/*/py*/*.log # Vim's default session file diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b6fbc953aa8..0fd88914af8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,83 @@ Versions are `MAJOR.PATCH`. # Changelog +## 3006.5 (2023-12-12) + + +### Removed + +- Tech Debt - support for pysss removed due to functionality addition in Python 3.3 [#65029](https://github.com/saltstack/salt/issues/65029) + + +### Fixed + +- Improved error message when state arguments are accidentally passed as a string [#38098](https://github.com/saltstack/salt/issues/38098) +- Allow `pip.install` to create a log file that is passed in if the parent directory is writeable [#44722](https://github.com/saltstack/salt/issues/44722) +- Fixed merging of complex pillar overrides with salt-ssh states [#59802](https://github.com/saltstack/salt/issues/59802) +- Fixed gpg pillar rendering with salt-ssh [#60002](https://github.com/saltstack/salt/issues/60002) +- Made salt-ssh states not re-render pillars unnecessarily [#62230](https://github.com/saltstack/salt/issues/62230) +- Made Salt maintain options in Debian package repo definitions [#64130](https://github.com/saltstack/salt/issues/64130) +- Migrated all [`invoke`](https://www.pyinvoke.org/) tasks to [`python-tools-scripts`](https://github.com/s0undt3ch/python-tools-scripts). + + * `tasks/docs.py` -> `tools/precommit/docs.py` + * `tasks/docstrings.py` -> `tools/precommit/docstrings.py` + * `tasks/loader.py` -> `tools/precommit/loader.py` + * `tasks/filemap.py` -> `tools/precommit/filemap.py` [#64374](https://github.com/saltstack/salt/issues/64374) +- Fix salt user login shell path in Debian packages [#64377](https://github.com/saltstack/salt/issues/64377) +- Fill out lsb_distrib_xxxx (best estimate) grains if problems with retrieving lsb_release data [#64473](https://github.com/saltstack/salt/issues/64473) +- Fixed an issue in the ``file.directory`` state where the ``children_only`` keyword + argument was not being respected. [#64497](https://github.com/saltstack/salt/issues/64497) +- Move salt.ufw to correct location /etc/ufw/applications.d/ [#64572](https://github.com/saltstack/salt/issues/64572) +- Fixed salt-ssh stacktrace when retcode is not an integer [#64575](https://github.com/saltstack/salt/issues/64575) +- Fixed SSH shell seldomly fails to report any exit code [#64588](https://github.com/saltstack/salt/issues/64588) +- Fixed some issues in x509_v2 execution module private key functions [#64597](https://github.com/saltstack/salt/issues/64597) +- Fixed grp.getgrall() in utils/user.py causing performance issues [#64888](https://github.com/saltstack/salt/issues/64888) +- Fix user.list_groups omits remote groups via sssd, etc. [#64953](https://github.com/saltstack/salt/issues/64953) +- Ensure sync from _grains occurs before attempting pillar compilation in case custom grain used in pillar file [#65027](https://github.com/saltstack/salt/issues/65027) +- Moved gitfs locks to salt working dir to avoid lock wipes [#65086](https://github.com/saltstack/salt/issues/65086) +- Only attempt to create a keys directory when `--gen-keys` is passed to the `salt-key` CLI [#65093](https://github.com/saltstack/salt/issues/65093) +- Fix nonce verification, request server replies do not stomp on eachother. [#65114](https://github.com/saltstack/salt/issues/65114) +- speed up yumpkg list_pkgs by not requiring digest or signature verification on lookup. [#65152](https://github.com/saltstack/salt/issues/65152) +- Fix pkg.latest failing on windows for winrepo packages where the package is already up to date [#65165](https://github.com/saltstack/salt/issues/65165) +- Ensure __kwarg__ is preserved when checking for kwargs. This change affects proxy minions when used with Deltaproxy, which had kwargs popped when targeting multiple minions id. [#65179](https://github.com/saltstack/salt/issues/65179) +- Fixes traceback when state id is an int in a reactor SLS file. [#65210](https://github.com/saltstack/salt/issues/65210) +- Install logrotate config as /etc/logrotate.d/salt-common for Debian packages + Remove broken /etc/logrotate.d/salt directory from 3006.3 if it exists. [#65231](https://github.com/saltstack/salt/issues/65231) +- Use ``sha256`` as the default ``hash_type``. It has been the default since Salt v2016.9 [#65287](https://github.com/saltstack/salt/issues/65287) +- Preserve ownership on log rotation [#65288](https://github.com/saltstack/salt/issues/65288) +- Ensure that the correct value of jid_inclue is passed if the argument is included in the passed keyword arguments. [#65302](https://github.com/saltstack/salt/issues/65302) +- Uprade relenv to 0.14.2 + - Update openssl to address CVE-2023-5363. + - Fix bug in openssl setup when openssl binary can't be found. + - Add M1 mac support. [#65316](https://github.com/saltstack/salt/issues/65316) +- Fix regex for filespec adding/deleting fcontext policy in selinux [#65340](https://github.com/saltstack/salt/issues/65340) +- Ensure CLI options take priority over Saltfile options [#65358](https://github.com/saltstack/salt/issues/65358) +- Test mode for state function `saltmod.wheel` no longer set's `result` to `(None,)` [#65372](https://github.com/saltstack/salt/issues/65372) +- Client only process events which tag conforms to an event return. [#65400](https://github.com/saltstack/salt/issues/65400) +- Fixes an issue setting user or machine policy on Windows when the Group Policy + directory is missing [#65411](https://github.com/saltstack/salt/issues/65411) +- Fix regression in file module which was not re-using a file client. [#65450](https://github.com/saltstack/salt/issues/65450) +- pip.installed state will now properly fail when a specified user does not exists [#65458](https://github.com/saltstack/salt/issues/65458) +- Publish channel connect callback method properly closes it's request channel. [#65464](https://github.com/saltstack/salt/issues/65464) +- Ensured the pillar in SSH wrapper modules is the same as the one used in template rendering when overrides are passed [#65483](https://github.com/saltstack/salt/issues/65483) +- Fix file.comment ignore_missing not working with multiline char [#65501](https://github.com/saltstack/salt/issues/65501) +- Warn when an un-closed transport client is being garbage collected. [#65554](https://github.com/saltstack/salt/issues/65554) +- Only generate the HMAC's for ``libssl.so.1.1`` and ``libcrypto.so.1.1`` if those files exist. [#65581](https://github.com/saltstack/salt/issues/65581) +- Fixed an issue where Salt Cloud would fail if it could not delete lingering + PAexec binaries [#65584](https://github.com/saltstack/salt/issues/65584) + + +### Added + +- Added Salt support for Debian 12 [#64223](https://github.com/saltstack/salt/issues/64223) +- Added Salt support for Amazon Linux 2023 [#64455](https://github.com/saltstack/salt/issues/64455) + + +### Security + +- Bump to `cryptography==41.0.4` due to https://github.com/advisories/GHSA-v8gr-m533-ghj9 [#65268](https://github.com/saltstack/salt/issues/65268) +- Bump to `cryptography==41.0.7` due to https://github.com/advisories/GHSA-jfhm-5ghh-2f97 [#65643](https://github.com/saltstack/salt/issues/65643) + ## 3006.4 (2023-10-16) diff --git a/changelog/38098.fixed.md b/changelog/38098.fixed.md deleted file mode 100644 index b40a693e456d..000000000000 --- a/changelog/38098.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Improved error message when state arguments are accidentally passed as a string diff --git a/changelog/44722.fixed.md b/changelog/44722.fixed.md deleted file mode 100644 index ad790a6d44f2..000000000000 --- a/changelog/44722.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Allow `pip.install` to create a log file that is passed in if the parent directory is writeable diff --git a/changelog/59802.fixed.md b/changelog/59802.fixed.md deleted file mode 100644 index e83222951c7d..000000000000 --- a/changelog/59802.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fixed merging of complex pillar overrides with salt-ssh states diff --git a/changelog/60002.fixed.md b/changelog/60002.fixed.md deleted file mode 100644 index 8d3869b7a3b7..000000000000 --- a/changelog/60002.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fixed gpg pillar rendering with salt-ssh diff --git a/changelog/62230.fixed.md b/changelog/62230.fixed.md deleted file mode 100644 index 8c83287a76fb..000000000000 --- a/changelog/62230.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Made salt-ssh states not re-render pillars unnecessarily diff --git a/changelog/64130.fixed.md b/changelog/64130.fixed.md deleted file mode 100644 index 3f99dd59f4a7..000000000000 --- a/changelog/64130.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Made Salt maintain options in Debian package repo definitions diff --git a/changelog/64223.added.md b/changelog/64223.added.md deleted file mode 100644 index a68be1cfe150..000000000000 --- a/changelog/64223.added.md +++ /dev/null @@ -1 +0,0 @@ -Added Salt support for Debian 12 diff --git a/changelog/64374.fixed.md b/changelog/64374.fixed.md deleted file mode 100644 index 31dfc9b1b1dd..000000000000 --- a/changelog/64374.fixed.md +++ /dev/null @@ -1,6 +0,0 @@ -Migrated all [`invoke`](https://www.pyinvoke.org/) tasks to [`python-tools-scripts`](https://github.com/s0undt3ch/python-tools-scripts). - -* `tasks/docs.py` -> `tools/precommit/docs.py` -* `tasks/docstrings.py` -> `tools/precommit/docstrings.py` -* `tasks/loader.py` -> `tools/precommit/loader.py` -* `tasks/filemap.py` -> `tools/precommit/filemap.py` diff --git a/changelog/64377.fixed.md b/changelog/64377.fixed.md deleted file mode 100644 index aa88ae79649c..000000000000 --- a/changelog/64377.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix salt user login shell path in Debian packages diff --git a/changelog/64455.added.md b/changelog/64455.added.md deleted file mode 100644 index 8885a93e59f1..000000000000 --- a/changelog/64455.added.md +++ /dev/null @@ -1 +0,0 @@ -Added Salt support for Amazon Linux 2023 diff --git a/changelog/64473.fixed.md b/changelog/64473.fixed.md deleted file mode 100644 index 411d90bf9be3..000000000000 --- a/changelog/64473.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fill out lsb_distrib_xxxx (best estimate) grains if problems with retrieving lsb_release data diff --git a/changelog/64497.fixed.md b/changelog/64497.fixed.md deleted file mode 100644 index 2d90737562df..000000000000 --- a/changelog/64497.fixed.md +++ /dev/null @@ -1,2 +0,0 @@ -Fixed an issue in the ``file.directory`` state where the ``children_only`` keyword -argument was not being respected. diff --git a/changelog/64572.fixed.md b/changelog/64572.fixed.md deleted file mode 100644 index d9916bb29faf..000000000000 --- a/changelog/64572.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Move salt.ufw to correct location /etc/ufw/applications.d/ diff --git a/changelog/64575.fixed.md b/changelog/64575.fixed.md deleted file mode 100644 index 71ff76ea9d41..000000000000 --- a/changelog/64575.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fixed salt-ssh stacktrace when retcode is not an integer diff --git a/changelog/64588.fixed.md b/changelog/64588.fixed.md deleted file mode 100644 index bf9def4eb4e4..000000000000 --- a/changelog/64588.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fixed SSH shell seldomly fails to report any exit code diff --git a/changelog/64597.fixed.md b/changelog/64597.fixed.md deleted file mode 100644 index 1810ce5ebb8f..000000000000 --- a/changelog/64597.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fixed some issues in x509_v2 execution module private key functions diff --git a/changelog/64888.fixed.md b/changelog/64888.fixed.md deleted file mode 100644 index 08b2efd0424c..000000000000 --- a/changelog/64888.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fixed grp.getgrall() in utils/user.py causing performance issues diff --git a/changelog/64953.fixed.md b/changelog/64953.fixed.md deleted file mode 100644 index f0b1ed46f19f..000000000000 --- a/changelog/64953.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix user.list_groups omits remote groups via sssd, etc. diff --git a/changelog/65027.fixed.md b/changelog/65027.fixed.md deleted file mode 100644 index 43289c688b28..000000000000 --- a/changelog/65027.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Ensure sync from _grains occurs before attempting pillar compilation in case custom grain used in pillar file diff --git a/changelog/65029.removed.md b/changelog/65029.removed.md deleted file mode 100644 index d09f10b4ba31..000000000000 --- a/changelog/65029.removed.md +++ /dev/null @@ -1 +0,0 @@ -Tech Debt - support for pysss removed due to functionality addition in Python 3.3 diff --git a/changelog/65086.fixed.md b/changelog/65086.fixed.md deleted file mode 100644 index 292930f0fd46..000000000000 --- a/changelog/65086.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Moved gitfs locks to salt working dir to avoid lock wipes diff --git a/changelog/65093.fixed.md b/changelog/65093.fixed.md deleted file mode 100644 index 55a5308689ce..000000000000 --- a/changelog/65093.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Only attempt to create a keys directory when `--gen-keys` is passed to the `salt-key` CLI diff --git a/changelog/65114.fixed.md b/changelog/65114.fixed.md deleted file mode 100644 index fb4f2ab15361..000000000000 --- a/changelog/65114.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix nonce verification, request server replies do not stomp on eachother. diff --git a/changelog/65152.fixed.md b/changelog/65152.fixed.md deleted file mode 100644 index dfa2dac34622..000000000000 --- a/changelog/65152.fixed.md +++ /dev/null @@ -1 +0,0 @@ -speed up yumpkg list_pkgs by not requiring digest or signature verification on lookup. diff --git a/changelog/65165.fixed.md b/changelog/65165.fixed.md deleted file mode 100644 index 7b981a517f80..000000000000 --- a/changelog/65165.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix pkg.latest failing on windows for winrepo packages where the package is already up to date diff --git a/changelog/65179.fixed.md b/changelog/65179.fixed.md deleted file mode 100644 index 0e3239465500..000000000000 --- a/changelog/65179.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Ensure __kwarg__ is preserved when checking for kwargs. This change affects proxy minions when used with Deltaproxy, which had kwargs popped when targeting multiple minions id. diff --git a/changelog/65193.fixed.md b/changelog/65193.fixed.md new file mode 100644 index 000000000000..48a7e76e461d --- /dev/null +++ b/changelog/65193.fixed.md @@ -0,0 +1,2 @@ +Fix issue with openscap when the error was outside the expected scope. It now +returns failed with the error code and the error diff --git a/changelog/65210.fixed.md b/changelog/65210.fixed.md deleted file mode 100644 index 7fe7b21dcbe3..000000000000 --- a/changelog/65210.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fixes traceback when state id is an int in a reactor SLS file. diff --git a/changelog/65231.fixed.md b/changelog/65231.fixed.md deleted file mode 100644 index 50d225e7452f..000000000000 --- a/changelog/65231.fixed.md +++ /dev/null @@ -1,2 +0,0 @@ -Install logrotate config as /etc/logrotate.d/salt-common for Debian packages -Remove broken /etc/logrotate.d/salt directory from 3006.3 if it exists. diff --git a/changelog/65268.security.md b/changelog/65268.security.md deleted file mode 100644 index 15588570ad69..000000000000 --- a/changelog/65268.security.md +++ /dev/null @@ -1 +0,0 @@ -Bump to `cryptography==41.0.4` due to https://github.com/advisories/GHSA-v8gr-m533-ghj9 diff --git a/changelog/65287.fixed.md b/changelog/65287.fixed.md deleted file mode 100644 index e075d251820c..000000000000 --- a/changelog/65287.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Use ``sha256`` as the default ``hash_type``. It has been the default since Salt v2016.9 diff --git a/changelog/65288.fixed.md b/changelog/65288.fixed.md deleted file mode 100644 index 885812433826..000000000000 --- a/changelog/65288.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Preserve ownership on log rotation diff --git a/changelog/65302.fixed.md b/changelog/65302.fixed.md deleted file mode 100644 index 087bd27314e7..000000000000 --- a/changelog/65302.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Ensure that the correct value of jid_inclue is passed if the argument is included in the passed keyword arguments. diff --git a/changelog/65316.fixed.md b/changelog/65316.fixed.md deleted file mode 100644 index f5f9e197e300..000000000000 --- a/changelog/65316.fixed.md +++ /dev/null @@ -1,4 +0,0 @@ -Uprade relenv to 0.14.2 - - Update openssl to address CVE-2023-5363. - - Fix bug in openssl setup when openssl binary can't be found. - - Add M1 mac support. diff --git a/changelog/65340.fixed.md b/changelog/65340.fixed.md deleted file mode 100644 index ed26da9f3cd5..000000000000 --- a/changelog/65340.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix regex for filespec adding/deleting fcontext policy in selinux diff --git a/changelog/65358.fixed.md b/changelog/65358.fixed.md deleted file mode 100644 index 9a9acc31b4de..000000000000 --- a/changelog/65358.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Ensure CLI options take priority over Saltfile options diff --git a/changelog/65372.fixed.md b/changelog/65372.fixed.md deleted file mode 100644 index 3ccf3d6578e2..000000000000 --- a/changelog/65372.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Test mode for state function `saltmod.wheel` no longer set's `result` to `(None,)` diff --git a/changelog/65400.fixed.md b/changelog/65400.fixed.md deleted file mode 100644 index ae21abac9fe0..000000000000 --- a/changelog/65400.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Client only process events which tag conforms to an event return. diff --git a/changelog/65411.fixed.md b/changelog/65411.fixed.md deleted file mode 100644 index 0500a7364ee1..000000000000 --- a/changelog/65411.fixed.md +++ /dev/null @@ -1,2 +0,0 @@ -Fixes an issue setting user or machine policy on Windows when the Group Policy -directory is missing diff --git a/changelog/65450.fixed.md b/changelog/65450.fixed.md deleted file mode 100644 index c680d37692ce..000000000000 --- a/changelog/65450.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix regression in file module which was not re-using a file client. diff --git a/changelog/65458.fixed.md b/changelog/65458.fixed.md deleted file mode 100644 index 61cc57df9ca7..000000000000 --- a/changelog/65458.fixed.md +++ /dev/null @@ -1 +0,0 @@ -pip.installed state will now properly fail when a specified user does not exists diff --git a/changelog/65464.fixed.md b/changelog/65464.fixed.md deleted file mode 100644 index a931b6a64456..000000000000 --- a/changelog/65464.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Publish channel connect callback method properly closes it's request channel. diff --git a/changelog/65483.fixed.md b/changelog/65483.fixed.md deleted file mode 100644 index 8092c6072d34..000000000000 --- a/changelog/65483.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Ensured the pillar in SSH wrapper modules is the same as the one used in template rendering when overrides are passed diff --git a/changelog/65501.fixed.md b/changelog/65501.fixed.md deleted file mode 100644 index 31592c67e701..000000000000 --- a/changelog/65501.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Fix file.comment ignore_missing not working with multiline char diff --git a/changelog/65554.fixed.md b/changelog/65554.fixed.md deleted file mode 100644 index 6d1598217e3b..000000000000 --- a/changelog/65554.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Warn when an un-closed transport client is being garbage collected. diff --git a/changelog/65581.fixed.md b/changelog/65581.fixed.md deleted file mode 100644 index 3ac7427b6981..000000000000 --- a/changelog/65581.fixed.md +++ /dev/null @@ -1 +0,0 @@ -Only generate the HMAC's for ``libssl.so.1.1`` and ``libcrypto.so.1.1`` if those files exist. diff --git a/changelog/65584.fixed.md b/changelog/65584.fixed.md deleted file mode 100644 index 1da48b32bb04..000000000000 --- a/changelog/65584.fixed.md +++ /dev/null @@ -1,2 +0,0 @@ -Fixed an issue where Salt Cloud would fail if it could not delete lingering -PAexec binaries diff --git a/changelog/65643.security.md b/changelog/65643.security.md deleted file mode 100644 index 19ee102d1d2f..000000000000 --- a/changelog/65643.security.md +++ /dev/null @@ -1 +0,0 @@ -Bump to `cryptography==41.0.7` due to https://github.com/advisories/GHSA-jfhm-5ghh-2f97 diff --git a/changelog/65670.fixed.md b/changelog/65670.fixed.md new file mode 100644 index 000000000000..54728d69d43a --- /dev/null +++ b/changelog/65670.fixed.md @@ -0,0 +1 @@ +Fixed Salt-SSH pillar rendering and state rendering with nested SSH calls when called via saltutil.cmd or in an orchestration diff --git a/doc/man/salt-api.1 b/doc/man/salt-api.1 index cfbe64d172ad..214422b243a4 100644 --- a/doc/man/salt-api.1 +++ b/doc/man/salt-api.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-API" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-API" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-api \- salt-api Command .sp diff --git a/doc/man/salt-call.1 b/doc/man/salt-call.1 index 2a964330511f..2e6698b8d4ac 100644 --- a/doc/man/salt-call.1 +++ b/doc/man/salt-call.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-CALL" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-CALL" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-call \- salt-call Documentation .SH SYNOPSIS diff --git a/doc/man/salt-cloud.1 b/doc/man/salt-cloud.1 index e3866a63e31e..7dc0450cc5cd 100644 --- a/doc/man/salt-cloud.1 +++ b/doc/man/salt-cloud.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-CLOUD" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-CLOUD" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-cloud \- Salt Cloud Command .sp diff --git a/doc/man/salt-cp.1 b/doc/man/salt-cp.1 index 0ad964aaf7b5..015e1b3461ce 100644 --- a/doc/man/salt-cp.1 +++ b/doc/man/salt-cp.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-CP" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-CP" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-cp \- salt-cp Documentation .sp diff --git a/doc/man/salt-key.1 b/doc/man/salt-key.1 index 913c2cf5b9ed..f346507ed58d 100644 --- a/doc/man/salt-key.1 +++ b/doc/man/salt-key.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-KEY" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-KEY" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-key \- salt-key Documentation .SH SYNOPSIS diff --git a/doc/man/salt-master.1 b/doc/man/salt-master.1 index b8bd9056ff33..1b54882a9b2e 100644 --- a/doc/man/salt-master.1 +++ b/doc/man/salt-master.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-MASTER" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-MASTER" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-master \- salt-master Documentation .sp diff --git a/doc/man/salt-minion.1 b/doc/man/salt-minion.1 index 5fb106bf3f29..d02a0bfa2e54 100644 --- a/doc/man/salt-minion.1 +++ b/doc/man/salt-minion.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-MINION" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-MINION" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-minion \- salt-minion Documentation .sp diff --git a/doc/man/salt-proxy.1 b/doc/man/salt-proxy.1 index 97fdfa6c6ae5..0f5b3b8e40e3 100644 --- a/doc/man/salt-proxy.1 +++ b/doc/man/salt-proxy.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-PROXY" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-PROXY" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-proxy \- salt-proxy Documentation .sp diff --git a/doc/man/salt-run.1 b/doc/man/salt-run.1 index 6954234ae0c0..1535cbde86c5 100644 --- a/doc/man/salt-run.1 +++ b/doc/man/salt-run.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-RUN" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-RUN" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-run \- salt-run Documentation .sp diff --git a/doc/man/salt-ssh.1 b/doc/man/salt-ssh.1 index b085a6a6cde6..5771453ad143 100644 --- a/doc/man/salt-ssh.1 +++ b/doc/man/salt-ssh.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-SSH" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-SSH" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-ssh \- salt-ssh Documentation .SH SYNOPSIS diff --git a/doc/man/salt-syndic.1 b/doc/man/salt-syndic.1 index 5e26b223e349..7a42e21c7676 100644 --- a/doc/man/salt-syndic.1 +++ b/doc/man/salt-syndic.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT-SYNDIC" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT-SYNDIC" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt-syndic \- salt-syndic Documentation .sp diff --git a/doc/man/salt.1 b/doc/man/salt.1 index 852508174cc9..127fce42145c 100644 --- a/doc/man/salt.1 +++ b/doc/man/salt.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt \- salt .SH SYNOPSIS diff --git a/doc/man/salt.7 b/doc/man/salt.7 index 7457d9dd5f48..a2232d27d5ae 100644 --- a/doc/man/salt.7 +++ b/doc/man/salt.7 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SALT" "7" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SALT" "7" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME salt \- Salt Documentation .SH SALT PROJECT @@ -1404,6 +1404,9 @@ If someone isn\(aqt an expert in this area, what will they need to know? .sp This will also help you out, because when you go to create the PR it will automatically insert the body of your commit messages. +.sp +See the \fI\%changelog\fP +docs for more information. .SS Pull request time! .sp Once you\(aqve done all your dev work and tested locally, you should check @@ -7880,6 +7883,13 @@ log_level: warning .fi .UNINDENT .UNINDENT +.sp +Any log level below the \fIinfo\fP level is INSECURE and may log sensitive data. This currently includes: +#. profile +#. debug +#. trace +#. garbage +#. all .SS \fBlog_level_logfile\fP .sp Default: \fBwarning\fP @@ -7897,6 +7907,13 @@ log_level_logfile: warning .fi .UNINDENT .UNINDENT +.sp +Any log level below the \fIinfo\fP level is INSECURE and may log sensitive data. This currently includes: +#. profile +#. debug +#. trace +#. garbage +#. all .SS \fBlog_datefmt\fP .sp Default: \fB%H:%M:%S\fP @@ -12644,6 +12661,13 @@ log_level: warning .fi .UNINDENT .UNINDENT +.sp +Any log level below the \fIinfo\fP level is INSECURE and may log sensitive data. This currently includes: +#. profile +#. debug +#. trace +#. garbage +#. all .SS \fBlog_level_logfile\fP .sp Default: \fBwarning\fP @@ -12661,6 +12685,13 @@ log_level_logfile: warning .fi .UNINDENT .UNINDENT +.sp +Any log level below the \fIinfo\fP level is INSECURE and may log sensitive data. This currently includes: +#. profile +#. debug +#. trace +#. garbage +#. all .SS \fBlog_datefmt\fP .sp Default: \fB%H:%M:%S\fP @@ -15144,7 +15175,7 @@ For reference, see: # One of \(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq, info\(aq, \(aqwarning\(aq, \(aqerror\(aq, \(aqcritical\(aq. # # The following log levels are considered INSECURE and may log sensitive data: -# [\(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq] +# [\(aqprofile\(aq, \(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq, \(aqall\(aq] # #log_level: warning @@ -16125,7 +16156,7 @@ For reference, see: # One of \(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq, \(aqinfo\(aq, \(aqwarning\(aq, \(aqerror\(aq, \(aqcritical\(aq. # # The following log levels are considered INSECURE and may log sensitive data: -# [\(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq] +# [\(aqprofile\(aq, \(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq, \(aqall\(aq] # # Default: \(aqwarning\(aq #log_level: warning @@ -16836,7 +16867,7 @@ For reference, see: # One of \(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq, \(aqinfo\(aq, \(aqwarning\(aq, \(aqerror\(aq, \(aqcritical\(aq. # # The following log levels are considered INSECURE and may log sensitive data: -# [\(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq] +# [\(aqprofile\(aq, \(aqgarbage\(aq, \(aqtrace\(aq, \(aqdebug\(aq, \(aqall\(aq] # # Default: \(aqwarning\(aq #log_level: warning @@ -19003,6 +19034,13 @@ Everything T} _ .TE +.sp +Any log level below the \fIinfo\fP level is INSECURE and may log sensitive data. This currently includes: +#. profile +#. debug +#. trace +#. garbage +#. all .SS Available Configuration Settings .SS \fBlog_file\fP .sp @@ -23710,7 +23748,7 @@ most secure setup, only connect syndics directly to master of masters. .INDENT 0.0 .TP .B email -\fI\%saltproject-security.pdl@broadcom.com\fP +\fI\%saltproject\-security.pdl@broadcom.com\fP .TP .B gpg key ID 4EA0793D @@ -23831,7 +23869,7 @@ fwPKmQ2cKnCBs5ASj1DkgUcz2c8DTUPVqg== .UNINDENT .UNINDENT .sp -The SaltStack Security Team is available at \fI\%saltproject-security.pdl@broadcom.com\fP for +The SaltStack Security Team is available at \fI\%saltproject\-security.pdl@broadcom.com\fP for security\-related bug reports or questions. .sp We request the disclosure of any security\-related bugs or issues be reported @@ -23845,7 +23883,7 @@ seriously. Our disclosure policy is intended to resolve security issues as quickly and safely as is possible. .INDENT 0.0 .IP 1. 3 -A security report sent to \fI\%saltproject-security.pdl@broadcom.com\fP is assigned to a team +A security report sent to \fI\%saltproject\-security.pdl@broadcom.com\fP is assigned to a team member. This person is the primary contact for questions and will coordinate the fix, release, and announcement. .IP 2. 3 @@ -57834,7 +57872,7 @@ jim_nologin: .UNINDENT .UNINDENT -.SS Creates +.SS creates .sp New in version 3001. @@ -85544,6 +85582,243 @@ tools pkg build windows \-\-salt\-version \-\-arch .sp The python library is available in the install directory of the onedir package. For example on linux the default location would be \fB/opt/saltstack/salt/bin/python3\fP\&. +.SS Testing the packages +.sp +If you want to test your built packages, or any other collection of salt packages post 3006.0, follow \fI\%this guide\fP +.SS Testing packages +.SS The package test suite +.sp +The salt repo provides a test suite for testing basic functionality of our +packages at \fB/pkg/tests/\fP\&. You can run the install, upgrade, and +downgrade tests. These tests run automatically on most PRs that are submitted +against Salt. +.sp +\fBWARNING:\fP +.INDENT 0.0 +.INDENT 3.5 +These tests make destructive changes to your system because they install the +built packages onto the system. They may also install older versions in the +case of upgrades or downgrades. To prevent destructive changes, run the +tests in an isolated system, preferably a virtual machine. +.UNINDENT +.UNINDENT +.SS Setup +.sp +In order to run the package tests, the \fI\%relenv\fP onedir and +built packages need to be placed in the correct locations. +.INDENT 0.0 +.IP \(bu 2 +Place all salt packages for the applicable testing version in +\fB/pkg/artifacts/\fP\&. +.IP \(bu 2 +The onedir must be located under \fB/artifacts/\fP\&. +.IP \(bu 2 +Additionally, to ensure complete parity with Salt\(aqs CI/CD suite, place the +\fBnox\fP virtual environment in \fB/.nox/test\-pkgs\-onedir\fP\&. +.UNINDENT +.sp +The following are a few ways this can be accomplished easily. +.sp +You can ensure parity by installing the package test suite through a few +possible methods: +.INDENT 0.0 +.IP \(bu 2 +Using \fBtools\fP +.IP \(bu 2 +Downloading individually +.UNINDENT +.SS Using \fBtools\fP +.sp +Salt has preliminary support for setting up the package test suite in the +\fBtools\fP command suite that is located under \fB/tools/testsuite/\fP\&. +This method requires the Github CLI tool \fBgh\fP (\fI\%https://cli.github.com/\fP) to be properly configured for +interaction with the salt repo. +.INDENT 0.0 +.IP 1. 3 +Install the dependencies using this command: +.INDENT 3.0 +.INDENT 3.5 +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pip install \-r requirements/static/ci/py{python_version}/tools.txt +.ft P +.fi +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.IP 2. 3 +Download and extract the artifacts with this \fBtools\fP command: +.INDENT 3.0 +.INDENT 3.5 +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +tools ts setup \-\-platform {linux|darwin|windows} \-\-slug + \-\-pr \-\-pkg +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +The most common use case is to test the packages built on a CI/CD run for a +given PR. To see the possible options for each argument, and other ways to +utilize this command, use the following: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +tools ts setup \-h +.ft P +.fi +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.sp +\fBWARNING:\fP +.INDENT 0.0 +.INDENT 3.5 +You can only download artifacts from finished workflow runs. This is something +imposed by the GitHub API. +To download artifacts from a running workflow run, you either have to wait for +the finish or cancel it. +.UNINDENT +.UNINDENT +.SS Downloading individually +.sp +If the \fBtools ts setup\fP command doesn\(aqt work, you can download, unzip, and +place the artifacts in the correct locations manually. Typically, you want to +test packages built on a CI/CD run for a given PR. This guide explains how to +set up for running the package tests using those artifacts. An analogous process +can be performed for artifacts from nightly builds. +.INDENT 0.0 +.IP 1. 3 +Find and download the artifacts: +.INDENT 3.0 +.INDENT 3.5 +Under the summary page for the most recent actions run for that PR, there is +a list of available artifacts from that run that can be downloaded. Download +the package artifacts by finding +\fBsalt\-.+.\-\-\fP\&. For example, the +amd64 deb packages might look like: +\fBsalt\-3006.2+123.01234567890\-x86_64\-deb\fP\&. +.sp +The onedir artifact will look like +\fBsalt\-.+.\-onedir\-\-.tar.xz\fP\&. For +instance, the macos x86_64 onedir may have the name +\fBsalt\-3006.2+123.01234567890\-onedir\-darwin\-x86_64.tar.xz\fP\&. +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +Windows onedir artifacts have \fB\&.zip\fP extensions instead of \fBtar.xz\fP +.UNINDENT +.UNINDENT +.sp +While it is optional, it is recommended to download the \fBnox\fP session +artifact as well. This will have the form of +\fBnox\-\-test\-pkgs\-onedir\-\fP\&. The amd64 Ubuntu 20.04 nox +artifact may look like \fBnox\-ubuntu\-20.04\-test\-pkgs\-onedir\-x86_64\fP\&. +.UNINDENT +.UNINDENT +.IP 2. 3 +Place the artifacts in the correct location: +.INDENT 3.0 +.INDENT 3.5 +Unzip the packages and place them in \fB/pkg/artifacts/\fP\&. +.sp +You must unzip and untar the onedir packages and place them in +\fB/artifacts/\fP\&. Windows onedir requires an additional unzip +action. If you set it up correctly, the \fB/artifacts/salt\fP +directory then contains the uncompressed onedir files. +.sp +Additionally, decompress the \fBnox\fP artifact and place it under +\fB/.nox/\fP\&. +.UNINDENT +.UNINDENT +.UNINDENT +.SS Running the tests +.sp +You can run the test suite run if all the artifacts are in the correct location. +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +You need root access to run the test artifacts. Run all nox commands at the +root of the salt repo and as the root user. +.UNINDENT +.UNINDENT +.INDENT 0.0 +.IP 1. 3 +Install \fBnox\fP: +.INDENT 3.0 +.INDENT 3.5 +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pip install nox +.ft P +.fi +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.IP 2. 3 +Run the install tests: +.INDENT 3.0 +.INDENT 3.5 +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +nox \-e test\-pkgs\-onedir \-\- install +.ft P +.fi +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.IP 3. 3 +Run the upgrade or downgrade tests: +.INDENT 3.0 +.INDENT 3.5 +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +nox \-e test\-pkgs\-onedir \-\- upgrade \-\-prev\-version +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +You can run the downgrade tests in the same way, replacing \fBupgrade\fP with +\fBdowngrade\fP\&. +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +If you are testing upgrades or downgrades and classic packages are +available for your system, replace \fBupgrade\fP or +\fBdowngrade\fP with \fBupgrade\-classic\fP or \fBdowngrade\-classic\fP +respectively to test against those versions. +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT .SH COMMAND LINE REFERENCE .SS salt\-api .SS \fBsalt\-api\fP @@ -88275,19 +88550,6 @@ The Python interface to PAM does not support authenticating as \fBroot\fP\&. \fBNOTE:\fP .INDENT 0.0 .INDENT 3.5 -Using PAM groups with SSSD groups on python2. -.sp -To use sssd with the PAM eauth module and groups the \fIpysss\fP module is -needed. On RedHat/CentOS this is \fIpython\-sss\fP\&. -.sp -This should not be needed with python >= 3.3, because the \fIos\fP modules has the -\fIgetgrouplist\fP function. -.UNINDENT -.UNINDENT -.sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 This module executes itself in a subprocess in order to user the system python and pam libraries. We do this to avoid openssl version conflicts when running under a salt onedir build. @@ -194128,7 +194390,7 @@ Passes through all the parameters described in the \fI\%utils.http.query function\fP: .INDENT 7.0 .TP -.B salt.utils.http.query(url, method=\(aqGET\(aq, params=None, data=None, data_file=None, header_dict=None, header_list=None, header_file=None, username=None, password=None, auth=None, decode=False, decode_type=\(aqauto\(aq, status=False, headers=False, text=False, cookies=None, cookie_jar=None, cookie_format=\(aqlwp\(aq, persist_session=False, session_cookie_jar=None, data_render=False, data_renderer=None, header_render=False, header_renderer=None, template_dict=None, test=False, test_url=None, node=\(aqminion\(aq, port=80, opts=None, backend=None, ca_bundle=None, verify_ssl=None, cert=None, text_out=None, headers_out=None, decode_out=None, stream=False, streaming_callback=None, header_callback=None, handle=False, agent=\(aqSalt/3006.4\(aq, hide_fields=None, raise_error=True, formdata=False, formdata_fieldname=None, formdata_filename=None, decode_body=True, **kwargs) +.B salt.utils.http.query(url, method=\(aqGET\(aq, params=None, data=None, data_file=None, header_dict=None, header_list=None, header_file=None, username=None, password=None, auth=None, decode=False, decode_type=\(aqauto\(aq, status=False, headers=False, text=False, cookies=None, cookie_jar=None, cookie_format=\(aqlwp\(aq, persist_session=False, session_cookie_jar=None, data_render=False, data_renderer=None, header_render=False, header_renderer=None, template_dict=None, test=False, test_url=None, node=\(aqminion\(aq, port=80, opts=None, backend=None, ca_bundle=None, verify_ssl=None, cert=None, text_out=None, headers_out=None, decode_out=None, stream=False, streaming_callback=None, header_callback=None, handle=False, agent=\(aqSalt/3006.5\(aq, hide_fields=None, raise_error=True, formdata=False, formdata_fieldname=None, formdata_filename=None, decode_body=True, **kwargs) Query a resource, and decode the return data .UNINDENT .INDENT 7.0 @@ -261773,7 +262035,9 @@ Accepts either :all: to disable all binary packages, :none: to empty the set, or one or more package names with commas between them .TP .B log -Log file where a complete (maximum verbosity) record will be kept +Log file where a complete (maximum verbosity) record will be kept. +If this file doesn\(aqt exist and the parent directory is writeable, +it will be created. .TP .B proxy Specify a proxy in the form \fBuser:passwd@proxy.server:port\fP\&. Note @@ -320458,7 +320722,7 @@ CLI Example: .sp .nf .ft C -salt \(aq*\(aq file.chpgrp c:\etemp\etest.txt administrators +salt \(aq*\(aq file.chgrp c:\etemp\etest.txt administrators .ft P .fi .UNINDENT @@ -337948,8 +338212,8 @@ When encoding a certificate as \fBpkcs12\fP, a name for the certificate can be i Instead of returning the certificate, write it to this file path. .TP .B overwrite -If \fBpath\fP is specified and the file exists, do not overwrite it. -Defaults to false. +If \fBpath\fP is specified and the file exists, overwrite it. +Defaults to true. .TP .B raw Return the encoded raw bytes instead of a string. Defaults to false. @@ -338406,7 +338670,7 @@ Available: \fBrsa\fP, \fBec\fP, \fBed25519\fP, \fBed448\fP\&. Defaults to \fBrsa .B keysize For \fBrsa\fP, specifies the bitlength of the private key (2048, 3072, 4096). For \fBec\fP, specifies the NIST curve to use (256, 384, 521). -Irrelevant for Edwards\-curve schemes (\fIed25519\(ga\fP, \fBed448\fP). +Irrelevant for Edwards\-curve schemes (\fBed25519\fP, \fBed448\fP). Defaults to 2048 for RSA and 256 for EC. .TP .B passphrase @@ -338559,7 +338823,7 @@ Return the encoded raw bytes instead of a string. Defaults to false. .UNINDENT .INDENT 0.0 .TP -.B salt.modules.x509_v2.encode_private_key(private_key, encoding=\(aqpem\(aq, passphrase=None, pkcs12_encryption_compat=False, raw=False) +.B salt.modules.x509_v2.encode_private_key(private_key, encoding=\(aqpem\(aq, passphrase=None, private_key_passphrase=None, pkcs12_encryption_compat=False, raw=False) Create an encoded representation of a private key. .sp CLI Example: @@ -338575,7 +338839,7 @@ salt \(aq*\(aq x509.encode_private_key /etc/pki/my.key der .UNINDENT .INDENT 7.0 .TP -.B csr +.B private_key The private key to encode. .TP .B encoding @@ -338583,6 +338847,24 @@ Specify the encoding of the resulting private key. It can be returned as a \fBpem\fP string, base64\-encoded \fBder\fP and base64\-encoded \fBpkcs12\fP\&. Defaults to \fBpem\fP\&. .TP +.B passphrase +If this is specified, the private key will be encrypted using this +passphrase. The encryption algorithm cannot be selected, it will be +determined automatically as the best available one. +.TP +.B private_key_passphrase +New in version 3006.2. + +.sp +If the current \fBprivate_key\fP is encrypted, the passphrase to +decrypt it. +.TP +.B pkcs12_encryption_compat +Some operating systems are incompatible with the encryption defaults +for PKCS12 used since OpenSSL v3. This switch triggers a fallback to +\fBPBESv1SHA1And3KeyTripleDESCBC\fP\&. +Please consider the \fI\%notes on PKCS12 encryption\fP\&. +.TP .B raw Return the encoded raw bytes instead of a string. Defaults to false. .UNINDENT @@ -354046,7 +354328,7 @@ curl \-sSi localhost:8000/minions \e POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x\-yaml -Content\-Type: application/json +Content\-Type: application/x\-www\-form\-urlencoded tgt=*&fun=status.diskusage .ft P @@ -406792,6 +407074,9 @@ specified either using \fBrepo:tag\fP notation, or just the repo name (in which case a tag of \fBlatest\fP is assumed). .INDENT 7.0 .TP +.B name +The name of the docker image. +.TP .B images Run this state on more than one image at a time. The following two examples accomplish the same thing: @@ -406831,7 +407116,6 @@ all the deletions in a single run, rather than executing the state separately on each image (as it would in the first example). .TP .B force -False Salt will fail to remove any images currently in use by a container. Set this option to true to remove the image even if it is already present. @@ -406908,6 +407192,9 @@ myuser/myimage: .UNINDENT .INDENT 7.0 .TP +.B name +The name of the docker image. +.TP .B tag Tag name for the image. Required when using \fBbuild\fP, \fBload\fP, or \fBsls\fP to create the image, but optional if pulling from a repository. @@ -406967,10 +407254,13 @@ Changed in version 2018.3.0: The \fBtag\fP must be manually specified using the .TP .B force -False Set this parameter to \fBTrue\fP to force Salt to pull/build/load the image even if it is already present. .TP +.B insecure_registry +If \fBTrue\fP, the Docker client will permit the use of insecure +(non\-HTTPS) registries. +.TP .B client_timeout Timeout in seconds for the Docker client. This is not a timeout for the state, but for receiving a response from the API. @@ -407049,6 +407339,10 @@ Values passed this way will override Pillar values set via .sp New in version 2018.3.0. +.TP +.B kwargs +Additional keyword arguments to pass to +\fI\%docker.build\fP .UNINDENT .UNINDENT .SS salt.states.docker_network @@ -455896,7 +456190,7 @@ Create CA private key: \- keysize: 4096 \- backup: true \- require: - \- file: /etc/pki + \- file: /etc/pki/issued_certs Create self\-signed CA certificate: x509.certificate_managed: @@ -456375,7 +456669,7 @@ Available: \fBrsa\fP, \fBec\fP, \fBed25519\fP, \fBed448\fP\&. Defaults to \fBrsa .B keysize For \fBrsa\fP, specifies the bitlength of the private key (2048, 3072, 4096). For \fBec\fP, specifies the NIST curve to use (256, 384, 521). -Irrelevant for Edwards\-curve schemes (\fIed25519\(ga\fP, \fBed448\fP). +Irrelevant for Edwards\-curve schemes (\fBed25519\fP, \fBed448\fP). Defaults to 2048 for RSA and 256 for EC. .TP .B passphrase @@ -457626,7 +457920,7 @@ installed2 .UNINDENT .INDENT 0.0 .TP -.B salt.states.zcbuildout.installed(name, config=\(aqbuildout.cfg\(aq, quiet=False, parts=None, user=None, env=(), buildout_ver=None, test_release=False, distribute=None, new_st=None, offline=False, newest=False, python=\(aq/opt/actions\-runner/_work/salt\-priv/salt\-priv/.tools\-venvs/py3.10/docs/bin/python\(aq, debug=False, verbose=False, unless=None, onlyif=None, use_vt=False, loglevel=\(aqdebug\(aq, **kwargs) +.B salt.states.zcbuildout.installed(name, config=\(aqbuildout.cfg\(aq, quiet=False, parts=None, user=None, env=(), buildout_ver=None, test_release=False, distribute=None, new_st=None, offline=False, newest=False, python=\(aq/opt/actions\-runner/_work/salt/salt/.tools\-venvs/py3.10/docs/bin/python\(aq, debug=False, verbose=False, unless=None, onlyif=None, use_vt=False, loglevel=\(aqdebug\(aq, **kwargs) Install buildout in a specific directory .sp It is a thin wrapper to modules.buildout.buildout @@ -461812,7 +462106,7 @@ to execute those modules instead. Each module type has a corresponding loader function. .INDENT 0.0 .TP -.B salt.loader.minion_mods(opts, context=None, utils=None, whitelist=None, initial_load=False, loaded_base_name=None, notify=False, static_modules=None, proxy=None) +.B salt.loader.minion_mods(opts, context=None, utils=None, whitelist=None, initial_load=False, loaded_base_name=None, notify=False, static_modules=None, proxy=None, file_client=None) Load execution modules .sp Returns a dictionary of execution modules appropriate for the current @@ -461905,7 +462199,7 @@ testmod[\(aqtest.ping\(aq]() .UNINDENT .INDENT 0.0 .TP -.B salt.loader.states(opts, functions, utils, serializers, whitelist=None, proxy=None, context=None, loaded_base_name=None) +.B salt.loader.states(opts, functions, utils, serializers, whitelist=None, proxy=None, context=None, loaded_base_name=None, file_client=None) Returns the state modules .INDENT 7.0 .TP @@ -467324,6 +467618,9 @@ If someone isn\(aqt an expert in this area, what will they need to know? .sp This will also help you out, because when you go to create the PR it will automatically insert the body of your commit messages. +.sp +See the \fI\%changelog\fP +docs for more information. .SS Pull request time! .sp Once you\(aqve done all your dev work and tested locally, you should check @@ -469623,13 +469920,9 @@ The following dunder dictionaries are always defined, but may be empty .UNINDENT .SS __opts__ .sp -\&..versionchanged:: 3006.0 -.INDENT 0.0 -.INDENT 3.5 -The \fB__opts__\fP dictionary can now be accessed via +Changed in version 3006.0: The \fB__opts__\fP dictionary can now be accessed via \fBcontext\(ga\fP\&. -.UNINDENT -.UNINDENT + .sp Defined in: All modules .sp @@ -469723,13 +470016,6 @@ When running an execution module \fB__context__\fP persists across all module executions until the modules are refreshed; such as when \fI\%saltutil.sync_all\fP or \fI\%state.apply\fP are executed. -.sp -A great place to see how to use \fB__context__\fP is in the cp.py module in -salt/modules/cp.py. The fileclient authenticates with the master when it is -instantiated and then is used to copy files to the minion. Rather than create a -new fileclient for each file that is to be copied down, one instance of the -fileclient is instantiated in the \fB__context__\fP dictionary and is reused for -each file. Here is an example from salt/modules/cp.py: .INDENT 0.0 .INDENT 3.5 .sp @@ -469784,6 +470070,14 @@ Defined in: State .SS __sdb__ .sp Defined in: SDB +.SS __file_client__ +.sp +Changed in version 3006.5. + +.sp +The \fB__file_client__\fP dunder was added to states and execution modules. This +enables the use of a file client without haveing to instantiate one in +the module. .SS Configuration Options .sp A number of configuration options can affect the load process. This is a quick @@ -477575,6 +477869,137 @@ Bump to \fBurllib3==1.26.17\fP or \fBurllib3==2.0.6\fP due to \fI\%https://githu .IP \(bu 2 Bump to \fBgitpython==3.1.37\fP due to \fI\%https://github.com/advisories/GHSA\-cwvm\-v4w8\-q58c\fP \fI\%#65383\fP .UNINDENT +(release\-3006.5)= +.SS Salt 3006.5 release notes +.SS Changelog +.SS Removed +.INDENT 0.0 +.IP \(bu 2 +Tech Debt \- support for pysss removed due to functionality addition in Python 3.3 \fI\%#65029\fP +.UNINDENT +.SS Fixed +.INDENT 0.0 +.IP \(bu 2 +Improved error message when state arguments are accidentally passed as a string \fI\%#38098\fP +.IP \(bu 2 +Allow \fBpip.install\fP to create a log file that is passed in if the parent directory is writeable \fI\%#44722\fP +.IP \(bu 2 +Fixed merging of complex pillar overrides with salt\-ssh states \fI\%#59802\fP +.IP \(bu 2 +Fixed gpg pillar rendering with salt\-ssh \fI\%#60002\fP +.IP \(bu 2 +Made salt\-ssh states not re\-render pillars unnecessarily \fI\%#62230\fP +.IP \(bu 2 +Made Salt maintain options in Debian package repo definitions \fI\%#64130\fP +.IP \(bu 2 +Migrated all \fI\%invoke\fP tasks to \fI\%python\-tools\-scripts\fP\&. +.INDENT 2.0 +.IP \(bu 2 +\fBtasks/docs.py\fP \-> \fBtools/precommit/docs.py\fP +.IP \(bu 2 +\fBtasks/docstrings.py\fP \-> \fBtools/precommit/docstrings.py\fP +.IP \(bu 2 +\fBtasks/loader.py\fP \-> \fBtools/precommit/loader.py\fP +.IP \(bu 2 +\fBtasks/filemap.py\fP \-> \fBtools/precommit/filemap.py\fP \fI\%#64374\fP +.UNINDENT +.IP \(bu 2 +Fix salt user login shell path in Debian packages \fI\%#64377\fP +.IP \(bu 2 +Fill out lsb_distrib_xxxx (best estimate) grains if problems with retrieving lsb_release data \fI\%#64473\fP +.IP \(bu 2 +Fixed an issue in the \fBfile.directory\fP state where the \fBchildren_only\fP keyword +argument was not being respected. \fI\%#64497\fP +.IP \(bu 2 +Move salt.ufw to correct location /etc/ufw/applications.d/ \fI\%#64572\fP +.IP \(bu 2 +Fixed salt\-ssh stacktrace when retcode is not an integer \fI\%#64575\fP +.IP \(bu 2 +Fixed SSH shell seldomly fails to report any exit code \fI\%#64588\fP +.IP \(bu 2 +Fixed some issues in x509_v2 execution module private key functions \fI\%#64597\fP +.IP \(bu 2 +Fixed grp.getgrall() in utils/user.py causing performance issues \fI\%#64888\fP +.IP \(bu 2 +Fix user.list_groups omits remote groups via sssd, etc. \fI\%#64953\fP +.IP \(bu 2 +Ensure sync from _grains occurs before attempting pillar compilation in case custom grain used in pillar file \fI\%#65027\fP +.IP \(bu 2 +Moved gitfs locks to salt working dir to avoid lock wipes \fI\%#65086\fP +.IP \(bu 2 +Only attempt to create a keys directory when \fB\-\-gen\-keys\fP is passed to the \fBsalt\-key\fP CLI \fI\%#65093\fP +.IP \(bu 2 +Fix nonce verification, request server replies do not stomp on eachother. \fI\%#65114\fP +.IP \(bu 2 +speed up yumpkg list_pkgs by not requiring digest or signature verification on lookup. \fI\%#65152\fP +.IP \(bu 2 +Fix pkg.latest failing on windows for winrepo packages where the package is already up to date \fI\%#65165\fP +.IP \(bu 2 +Ensure \fBkwarg\fP is preserved when checking for kwargs. This change affects proxy minions when used with Deltaproxy, which had kwargs popped when targeting multiple minions id. \fI\%#65179\fP +.IP \(bu 2 +Fixes traceback when state id is an int in a reactor SLS file. \fI\%#65210\fP +.IP \(bu 2 +Install logrotate config as /etc/logrotate.d/salt\-common for Debian packages +Remove broken /etc/logrotate.d/salt directory from 3006.3 if it exists. \fI\%#65231\fP +.IP \(bu 2 +Use \fBsha256\fP as the default \fBhash_type\fP\&. It has been the default since Salt v2016.9 \fI\%#65287\fP +.IP \(bu 2 +Preserve ownership on log rotation \fI\%#65288\fP +.IP \(bu 2 +Ensure that the correct value of jid_inclue is passed if the argument is included in the passed keyword arguments. \fI\%#65302\fP +.IP \(bu 2 +Uprade relenv to 0.14.2 +.INDENT 2.0 +.IP \(bu 2 +Update openssl to address CVE\-2023\-5363. +.IP \(bu 2 +Fix bug in openssl setup when openssl binary can\(aqt be found. +.IP \(bu 2 +Add M1 mac support. \fI\%#65316\fP +.UNINDENT +.IP \(bu 2 +Fix regex for filespec adding/deleting fcontext policy in selinux \fI\%#65340\fP +.IP \(bu 2 +Ensure CLI options take priority over Saltfile options \fI\%#65358\fP +.IP \(bu 2 +Test mode for state function \fBsaltmod.wheel\fP no longer set\(aqs \fBresult\fP to \fB(None,)\fP \fI\%#65372\fP +.IP \(bu 2 +Client only process events which tag conforms to an event return. \fI\%#65400\fP +.IP \(bu 2 +Fixes an issue setting user or machine policy on Windows when the Group Policy +directory is missing \fI\%#65411\fP +.IP \(bu 2 +Fix regression in file module which was not re\-using a file client. \fI\%#65450\fP +.IP \(bu 2 +pip.installed state will now properly fail when a specified user does not exists \fI\%#65458\fP +.IP \(bu 2 +Publish channel connect callback method properly closes it\(aqs request channel. \fI\%#65464\fP +.IP \(bu 2 +Ensured the pillar in SSH wrapper modules is the same as the one used in template rendering when overrides are passed \fI\%#65483\fP +.IP \(bu 2 +Fix file.comment ignore_missing not working with multiline char \fI\%#65501\fP +.IP \(bu 2 +Warn when an un\-closed transport client is being garbage collected. \fI\%#65554\fP +.IP \(bu 2 +Only generate the HMAC\(aqs for \fBlibssl.so.1.1\fP and \fBlibcrypto.so.1.1\fP if those files exist. \fI\%#65581\fP +.IP \(bu 2 +Fixed an issue where Salt Cloud would fail if it could not delete lingering +PAexec binaries \fI\%#65584\fP +.UNINDENT +.SS Added +.INDENT 0.0 +.IP \(bu 2 +Added Salt support for Debian 12 \fI\%#64223\fP +.IP \(bu 2 +Added Salt support for Amazon Linux 2023 \fI\%#64455\fP +.UNINDENT +.SS Security +.INDENT 0.0 +.IP \(bu 2 +Bump to \fBcryptography==41.0.4\fP due to \fI\%https://github.com/advisories/GHSA\-v8gr\-m533\-ghj9\fP \fI\%#65268\fP +.IP \(bu 2 +Bump to \fBcryptography==41.0.7\fP due to \fI\%https://github.com/advisories/GHSA\-jfhm\-5ghh\-2f97\fP \fI\%#65643\fP +.UNINDENT .sp See \fI\%Install a release candidate\fP for more information about installing an RC when one is available. @@ -478536,6 +478961,34 @@ Bump to \fIcertifi==2023.07.22\fP due to \fI\%https://github.com/advisories/GHSA .sp Python 3.5 cannot get the updated requirements since certifi no longer supports this python version (#64720) .UNINDENT +.SS Salt 3005.3 Release Notes +.sp +Version 3005.3 is a Bug fix release for \fI\%3005\fP\&. +.SS Changed +.INDENT 0.0 +.IP \(bu 2 +Fix __env__ and improve cache cleaning see more info at pull #65017. (#65002) +.UNINDENT +.SS Security +.INDENT 0.0 +.IP \(bu 2 +Update to \fIgitpython>=3.1.35\fP due to \fI\%https://github.com/advisories/GHSA\-wfm5\-v35h\-vwf4\fP and \fI\%https://github.com/advisories/GHSA\-cwvm\-v4w8\-q58c\fP (#65167) +.UNINDENT +.SS Salt 3005.4 Release Notes +.sp +Version 3005.4 is a CVE security fix release for \fI\%3005\fP\&. +.SS Security +.INDENT 0.0 +.IP \(bu 2 +Fix CVE\-2023\-34049 by ensuring we do not use a predictable name for the script and correctly check returncode of scp command. +This only impacts salt\-ssh users using the pre\-flight option. (cve\-2023\-34049) +.IP \(bu 2 +Bump to \fIcryptography==41.0.4\fP due to \fI\%https://github.com/advisories/GHSA\-v8gr\-m533\-ghj9\fP (#65267) +.IP \(bu 2 +Bump to \fIurllib3==1.26.17\fP or \fIurllib3==2.0.6\fP due to \fI\%https://github.com/advisories/GHSA\-v845\-jxx5\-vc9f\fP (#65334) +.IP \(bu 2 +Bump to \fIgitpython==3.1.37\fP due to \fI\%https://github.com/advisories/GHSA\-cwvm\-v4w8\-q58c\fP (#65383) +.UNINDENT .SS Salt 3004 Release Notes \- Codename Silicon .SS New Features .SS Transactional System Support (MicroOS) diff --git a/doc/man/spm.1 b/doc/man/spm.1 index 686ce512eeba..5e715ca48299 100644 --- a/doc/man/spm.1 +++ b/doc/man/spm.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SPM" "1" "Generated on October 16, 2023 at 05:24:47 PM UTC." "3006.4" "Salt" +.TH "SPM" "1" "Generated on December 12, 2023 at 05:54:17 PM UTC." "3006.5" "Salt" .SH NAME spm \- Salt Package Manager Command .sp diff --git a/doc/topics/packaging/testing.rst b/doc/topics/packaging/testing.rst index a5cad0ac3939..371b59af8558 100644 --- a/doc/topics/packaging/testing.rst +++ b/doc/topics/packaging/testing.rst @@ -27,7 +27,7 @@ In order to run the package tests, the `relenv built packages need to be placed in the correct locations. * Place all salt packages for the applicable testing version in - ``/pkg/artifacts/``. + ``/artifacts/pkg/``. * The onedir must be located under ``/artifacts/``. * Additionally, to ensure complete parity with Salt's CI/CD suite, place the ``nox`` virtual environment in ``/.nox/test-pkgs-onedir``. @@ -109,7 +109,7 @@ can be performed for artifacts from nightly builds. #. Place the artifacts in the correct location: - Unzip the packages and place them in ``/pkg/artifacts/``. + Unzip the packages and place them in ``/artifacts/pkg/``. You must unzip and untar the onedir packages and place them in ``/artifacts/``. Windows onedir requires an additional unzip diff --git a/doc/topics/releases/3006.5.md b/doc/topics/releases/3006.5.md new file mode 100644 index 000000000000..9e97f72382dd --- /dev/null +++ b/doc/topics/releases/3006.5.md @@ -0,0 +1,93 @@ +(release-3006.5)= +# Salt 3006.5 release notes + + + + + + + +## Changelog + +### Removed + +- Tech Debt - support for pysss removed due to functionality addition in Python 3.3 [#65029](https://github.com/saltstack/salt/issues/65029) + + +### Fixed + +- Improved error message when state arguments are accidentally passed as a string [#38098](https://github.com/saltstack/salt/issues/38098) +- Allow `pip.install` to create a log file that is passed in if the parent directory is writeable [#44722](https://github.com/saltstack/salt/issues/44722) +- Fixed merging of complex pillar overrides with salt-ssh states [#59802](https://github.com/saltstack/salt/issues/59802) +- Fixed gpg pillar rendering with salt-ssh [#60002](https://github.com/saltstack/salt/issues/60002) +- Made salt-ssh states not re-render pillars unnecessarily [#62230](https://github.com/saltstack/salt/issues/62230) +- Made Salt maintain options in Debian package repo definitions [#64130](https://github.com/saltstack/salt/issues/64130) +- Migrated all [`invoke`](https://www.pyinvoke.org/) tasks to [`python-tools-scripts`](https://github.com/s0undt3ch/python-tools-scripts). + + * `tasks/docs.py` -> `tools/precommit/docs.py` + * `tasks/docstrings.py` -> `tools/precommit/docstrings.py` + * `tasks/loader.py` -> `tools/precommit/loader.py` + * `tasks/filemap.py` -> `tools/precommit/filemap.py` [#64374](https://github.com/saltstack/salt/issues/64374) +- Fix salt user login shell path in Debian packages [#64377](https://github.com/saltstack/salt/issues/64377) +- Fill out lsb_distrib_xxxx (best estimate) grains if problems with retrieving lsb_release data [#64473](https://github.com/saltstack/salt/issues/64473) +- Fixed an issue in the ``file.directory`` state where the ``children_only`` keyword + argument was not being respected. [#64497](https://github.com/saltstack/salt/issues/64497) +- Move salt.ufw to correct location /etc/ufw/applications.d/ [#64572](https://github.com/saltstack/salt/issues/64572) +- Fixed salt-ssh stacktrace when retcode is not an integer [#64575](https://github.com/saltstack/salt/issues/64575) +- Fixed SSH shell seldomly fails to report any exit code [#64588](https://github.com/saltstack/salt/issues/64588) +- Fixed some issues in x509_v2 execution module private key functions [#64597](https://github.com/saltstack/salt/issues/64597) +- Fixed grp.getgrall() in utils/user.py causing performance issues [#64888](https://github.com/saltstack/salt/issues/64888) +- Fix user.list_groups omits remote groups via sssd, etc. [#64953](https://github.com/saltstack/salt/issues/64953) +- Ensure sync from _grains occurs before attempting pillar compilation in case custom grain used in pillar file [#65027](https://github.com/saltstack/salt/issues/65027) +- Moved gitfs locks to salt working dir to avoid lock wipes [#65086](https://github.com/saltstack/salt/issues/65086) +- Only attempt to create a keys directory when `--gen-keys` is passed to the `salt-key` CLI [#65093](https://github.com/saltstack/salt/issues/65093) +- Fix nonce verification, request server replies do not stomp on eachother. [#65114](https://github.com/saltstack/salt/issues/65114) +- speed up yumpkg list_pkgs by not requiring digest or signature verification on lookup. [#65152](https://github.com/saltstack/salt/issues/65152) +- Fix pkg.latest failing on windows for winrepo packages where the package is already up to date [#65165](https://github.com/saltstack/salt/issues/65165) +- Ensure __kwarg__ is preserved when checking for kwargs. This change affects proxy minions when used with Deltaproxy, which had kwargs popped when targeting multiple minions id. [#65179](https://github.com/saltstack/salt/issues/65179) +- Fixes traceback when state id is an int in a reactor SLS file. [#65210](https://github.com/saltstack/salt/issues/65210) +- Install logrotate config as /etc/logrotate.d/salt-common for Debian packages + Remove broken /etc/logrotate.d/salt directory from 3006.3 if it exists. [#65231](https://github.com/saltstack/salt/issues/65231) +- Use ``sha256`` as the default ``hash_type``. It has been the default since Salt v2016.9 [#65287](https://github.com/saltstack/salt/issues/65287) +- Preserve ownership on log rotation [#65288](https://github.com/saltstack/salt/issues/65288) +- Ensure that the correct value of jid_inclue is passed if the argument is included in the passed keyword arguments. [#65302](https://github.com/saltstack/salt/issues/65302) +- Uprade relenv to 0.14.2 + - Update openssl to address CVE-2023-5363. + - Fix bug in openssl setup when openssl binary can't be found. + - Add M1 mac support. [#65316](https://github.com/saltstack/salt/issues/65316) +- Fix regex for filespec adding/deleting fcontext policy in selinux [#65340](https://github.com/saltstack/salt/issues/65340) +- Ensure CLI options take priority over Saltfile options [#65358](https://github.com/saltstack/salt/issues/65358) +- Test mode for state function `saltmod.wheel` no longer set's `result` to `(None,)` [#65372](https://github.com/saltstack/salt/issues/65372) +- Client only process events which tag conforms to an event return. [#65400](https://github.com/saltstack/salt/issues/65400) +- Fixes an issue setting user or machine policy on Windows when the Group Policy + directory is missing [#65411](https://github.com/saltstack/salt/issues/65411) +- Fix regression in file module which was not re-using a file client. [#65450](https://github.com/saltstack/salt/issues/65450) +- pip.installed state will now properly fail when a specified user does not exists [#65458](https://github.com/saltstack/salt/issues/65458) +- Publish channel connect callback method properly closes it's request channel. [#65464](https://github.com/saltstack/salt/issues/65464) +- Ensured the pillar in SSH wrapper modules is the same as the one used in template rendering when overrides are passed [#65483](https://github.com/saltstack/salt/issues/65483) +- Fix file.comment ignore_missing not working with multiline char [#65501](https://github.com/saltstack/salt/issues/65501) +- Warn when an un-closed transport client is being garbage collected. [#65554](https://github.com/saltstack/salt/issues/65554) +- Only generate the HMAC's for ``libssl.so.1.1`` and ``libcrypto.so.1.1`` if those files exist. [#65581](https://github.com/saltstack/salt/issues/65581) +- Fixed an issue where Salt Cloud would fail if it could not delete lingering + PAexec binaries [#65584](https://github.com/saltstack/salt/issues/65584) + + +### Added + +- Added Salt support for Debian 12 [#64223](https://github.com/saltstack/salt/issues/64223) +- Added Salt support for Amazon Linux 2023 [#64455](https://github.com/saltstack/salt/issues/64455) + + +### Security + +- Bump to `cryptography==41.0.4` due to https://github.com/advisories/GHSA-v8gr-m533-ghj9 [#65268](https://github.com/saltstack/salt/issues/65268) +- Bump to `cryptography==41.0.7` due to https://github.com/advisories/GHSA-jfhm-5ghh-2f97 [#65643](https://github.com/saltstack/salt/issues/65643) diff --git a/doc/topics/releases/templates/3006.5.md.template b/doc/topics/releases/templates/3006.5.md.template new file mode 100644 index 000000000000..1750e9a85172 --- /dev/null +++ b/doc/topics/releases/templates/3006.5.md.template @@ -0,0 +1,14 @@ +(release-3006.5)= +# Salt 3006.5 release notes{{ unreleased }} +{{ warning }} + + + + +## Changelog +{{ changelog }} diff --git a/noxfile.py b/noxfile.py index 44f6409b276c..19a1d5f43c33 100644 --- a/noxfile.py +++ b/noxfile.py @@ -461,7 +461,7 @@ def _report_coverage( xml_coverage_file = COVERAGE_OUTPUT_DIR.relative_to(REPO_ROOT) / "salt.xml" html_coverage_dir = COVERAGE_OUTPUT_DIR.relative_to(REPO_ROOT) / "html" / "salt" cmd_args = [ - "--omit=tests/*,pkg/tests/*", + "--omit=tests/*,tests/pytests/pkg/*", "--include=salt/*", ] @@ -473,7 +473,7 @@ def _report_coverage( ) cmd_args = [ "--omit=salt/*", - "--include=tests/*,pkg/tests/*", + "--include=tests/*,tests/pytests/pkg/*", ] else: json_coverage_file = ( @@ -482,7 +482,7 @@ def _report_coverage( xml_coverage_file = COVERAGE_OUTPUT_DIR.relative_to(REPO_ROOT) / "coverage.xml" html_coverage_dir = COVERAGE_OUTPUT_DIR.relative_to(REPO_ROOT) / "html" / "full" cmd_args = [ - "--include=salt/*,tests/*,pkg/tests/*", + "--include=salt/*,tests/*,tests/pytests/pkg/*", ] if cli_report: @@ -1063,6 +1063,9 @@ def _ci_test(session, transport, onedir=False): if onedir: env["ONEDIR_TESTRUN"] = "1" chunks = { + "pkg": [ + "tests/pytests/pkg", + ], "unit": [ "tests/unit", "tests/pytests/unit", @@ -1070,7 +1073,9 @@ def _ci_test(session, transport, onedir=False): "functional": [ "tests/pytests/functional", ], - "scenarios": ["tests/pytests/scenarios"], + "scenarios": [ + "tests/pytests/scenarios", + ], } test_group_number = os.environ.get("TEST_GROUP") or "1" @@ -1857,31 +1862,41 @@ def ci_test_onedir_pkgs(session): ) ) + common_pytest_args = [ + "--color=yes", + "--sys-stats", + "--run-destructive", + f"--output-columns={os.environ.get('OUTPUT_COLUMNS') or 120}", + "--pkg-system-service", + ] + chunks = { - "install": ["pkg/tests/"], + "install": [ + "tests/pytests/pkg/", + ], "upgrade": [ "--upgrade", "--no-uninstall", - "pkg/tests/upgrade/", + "tests/pytests/pkg/upgrade/", ], "upgrade-classic": [ "--upgrade", "--no-uninstall", - "pkg/tests/upgrade/", + "tests/pytests/pkg/upgrade/", ], "downgrade": [ "--downgrade", "--no-uninstall", - "pkg/tests/downgrade/", + "tests/pytests/pkg/downgrade/", ], "downgrade-classic": [ "--downgrade", "--no-uninstall", - "pkg/tests/downgrade/", + "tests/pytests/pkg/downgrade/", ], "download-pkgs": [ "--download-pkgs", - "pkg/tests/download/", + "tests/pytests/pkg/download/", ], } @@ -1909,10 +1924,9 @@ def ci_test_onedir_pkgs(session): cmd_args.append("--classic") pytest_args = ( - cmd_args[:] + common_pytest_args[:] + + cmd_args[:] + [ - "-c", - str(REPO_ROOT / "pkg-tests-pytest.ini"), f"--junitxml=artifacts/xml-unittests-output/test-results-{chunk}.xml", f"--log-file=artifacts/logs/runtests-{chunk}.log", ] @@ -1921,6 +1935,9 @@ def ci_test_onedir_pkgs(session): try: _pytest(session, coverage=False, cmd_args=pytest_args, env=env) except CommandFailed: + if os.environ.get("RERUN_FAILURES", "0") == "0": + # Don't rerun on failures + return # Don't print the system information, not the test selection on reruns global PRINT_TEST_SELECTION @@ -1929,10 +1946,9 @@ def ci_test_onedir_pkgs(session): PRINT_SYSTEM_INFO = False pytest_args = ( - cmd_args[:] + common_pytest_args[:] + + cmd_args[:] + [ - "-c", - str(REPO_ROOT / "pkg-tests-pytest.ini"), f"--junitxml=artifacts/xml-unittests-output/test-results-{chunk}-rerun.xml", f"--log-file=artifacts/logs/runtests-{chunk}-rerun.log", "--lf", @@ -1950,10 +1966,9 @@ def ci_test_onedir_pkgs(session): if chunk not in ("install", "download-pkgs"): cmd_args = chunks["install"] pytest_args = ( - cmd_args[:] + common_pytest_args[:] + + cmd_args[:] + [ - "-c", - str(REPO_ROOT / "pkg-tests-pytest.ini"), "--no-install", f"--junitxml=artifacts/xml-unittests-output/test-results-install.xml", f"--log-file=artifacts/logs/runtests-install.log", @@ -1969,10 +1984,9 @@ def ci_test_onedir_pkgs(session): except CommandFailed: cmd_args = chunks["install"] pytest_args = ( - cmd_args[:] + common_pytest_args[:] + + cmd_args[:] + [ - "-c", - str(REPO_ROOT / "pkg-tests-pytest.ini"), "--no-install", f"--junitxml=artifacts/xml-unittests-output/test-results-install-rerun.xml", f"--log-file=artifacts/logs/runtests-install-rerun.log", diff --git a/pkg-tests-pytest.ini b/pkg-tests-pytest.ini deleted file mode 100644 index 390c029f863a..000000000000 --- a/pkg-tests-pytest.ini +++ /dev/null @@ -1,10 +0,0 @@ -[pytest] -log_date_format=%H:%M:%S -log_cli_format=%(asctime)s,%(msecs)03.0f [%(name)-5s:%(lineno)-4d][%(levelname)-8s][%(processName)s(%(process)s)] %(message)s -log_file_format=%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(processName)s(%(process)d)] %(message)s -norecursedirs=templates tests/ -testpaths=pkg/tests -python_files=test_*.py -python_classes=Test* -python_functions = test_* -junit_family=xunit2 diff --git a/pkg/debian/changelog b/pkg/debian/changelog index e45d8665d162..12f388a11280 100644 --- a/pkg/debian/changelog +++ b/pkg/debian/changelog @@ -1,3 +1,80 @@ +salt (3006.5) stable; urgency=medium + + + # Removed + + * Tech Debt - support for pysss removed due to functionality addition in Python 3.3 [#65029](https://github.com/saltstack/salt/issues/65029) + + # Fixed + + * Improved error message when state arguments are accidentally passed as a string [#38098](https://github.com/saltstack/salt/issues/38098) + * Allow `pip.install` to create a log file that is passed in if the parent directory is writeable [#44722](https://github.com/saltstack/salt/issues/44722) + * Fixed merging of complex pillar overrides with salt-ssh states [#59802](https://github.com/saltstack/salt/issues/59802) + * Fixed gpg pillar rendering with salt-ssh [#60002](https://github.com/saltstack/salt/issues/60002) + * Made salt-ssh states not re-render pillars unnecessarily [#62230](https://github.com/saltstack/salt/issues/62230) + * Made Salt maintain options in Debian package repo definitions [#64130](https://github.com/saltstack/salt/issues/64130) + * Migrated all [`invoke`](https://www.pyinvoke.org/) tasks to [`python-tools-scripts`](https://github.com/s0undt3ch/python-tools-scripts). + + * `tasks/docs.py` *> `tools/precommit/docs.py` + * `tasks/docstrings.py` *> `tools/precommit/docstrings.py` + * `tasks/loader.py` *> `tools/precommit/loader.py` + * `tasks/filemap.py` *> `tools/precommit/filemap.py` [#64374](https://github.com/saltstack/salt/issues/64374) + * Fix salt user login shell path in Debian packages [#64377](https://github.com/saltstack/salt/issues/64377) + * Fill out lsb_distrib_xxxx (best estimate) grains if problems with retrieving lsb_release data [#64473](https://github.com/saltstack/salt/issues/64473) + * Fixed an issue in the ``file.directory`` state where the ``children_only`` keyword + argument was not being respected. [#64497](https://github.com/saltstack/salt/issues/64497) + * Move salt.ufw to correct location /etc/ufw/applications.d/ [#64572](https://github.com/saltstack/salt/issues/64572) + * Fixed salt-ssh stacktrace when retcode is not an integer [#64575](https://github.com/saltstack/salt/issues/64575) + * Fixed SSH shell seldomly fails to report any exit code [#64588](https://github.com/saltstack/salt/issues/64588) + * Fixed some issues in x509_v2 execution module private key functions [#64597](https://github.com/saltstack/salt/issues/64597) + * Fixed grp.getgrall() in utils/user.py causing performance issues [#64888](https://github.com/saltstack/salt/issues/64888) + * Fix user.list_groups omits remote groups via sssd, etc. [#64953](https://github.com/saltstack/salt/issues/64953) + * Ensure sync from _grains occurs before attempting pillar compilation in case custom grain used in pillar file [#65027](https://github.com/saltstack/salt/issues/65027) + * Moved gitfs locks to salt working dir to avoid lock wipes [#65086](https://github.com/saltstack/salt/issues/65086) + * Only attempt to create a keys directory when `--gen-keys` is passed to the `salt-key` CLI [#65093](https://github.com/saltstack/salt/issues/65093) + * Fix nonce verification, request server replies do not stomp on eachother. [#65114](https://github.com/saltstack/salt/issues/65114) + * speed up yumpkg list_pkgs by not requiring digest or signature verification on lookup. [#65152](https://github.com/saltstack/salt/issues/65152) + * Fix pkg.latest failing on windows for winrepo packages where the package is already up to date [#65165](https://github.com/saltstack/salt/issues/65165) + * Ensure __kwarg__ is preserved when checking for kwargs. This change affects proxy minions when used with Deltaproxy, which had kwargs popped when targeting multiple minions id. [#65179](https://github.com/saltstack/salt/issues/65179) + * Fixes traceback when state id is an int in a reactor SLS file. [#65210](https://github.com/saltstack/salt/issues/65210) + * Install logrotate config as /etc/logrotate.d/salt-common for Debian packages + Remove broken /etc/logrotate.d/salt directory from 3006.3 if it exists. [#65231](https://github.com/saltstack/salt/issues/65231) + * Use ``sha256`` as the default ``hash_type``. It has been the default since Salt v2016.9 [#65287](https://github.com/saltstack/salt/issues/65287) + * Preserve ownership on log rotation [#65288](https://github.com/saltstack/salt/issues/65288) + * Ensure that the correct value of jid_inclue is passed if the argument is included in the passed keyword arguments. [#65302](https://github.com/saltstack/salt/issues/65302) + * Uprade relenv to 0.14.2 + * Update openssl to address CVE-2023-5363. + * Fix bug in openssl setup when openssl binary can't be found. + * Add M1 mac support. [#65316](https://github.com/saltstack/salt/issues/65316) + * Fix regex for filespec adding/deleting fcontext policy in selinux [#65340](https://github.com/saltstack/salt/issues/65340) + * Ensure CLI options take priority over Saltfile options [#65358](https://github.com/saltstack/salt/issues/65358) + * Test mode for state function `saltmod.wheel` no longer set's `result` to `(None,)` [#65372](https://github.com/saltstack/salt/issues/65372) + * Client only process events which tag conforms to an event return. [#65400](https://github.com/saltstack/salt/issues/65400) + * Fixes an issue setting user or machine policy on Windows when the Group Policy + directory is missing [#65411](https://github.com/saltstack/salt/issues/65411) + * Fix regression in file module which was not re-using a file client. [#65450](https://github.com/saltstack/salt/issues/65450) + * pip.installed state will now properly fail when a specified user does not exists [#65458](https://github.com/saltstack/salt/issues/65458) + * Publish channel connect callback method properly closes it's request channel. [#65464](https://github.com/saltstack/salt/issues/65464) + * Ensured the pillar in SSH wrapper modules is the same as the one used in template rendering when overrides are passed [#65483](https://github.com/saltstack/salt/issues/65483) + * Fix file.comment ignore_missing not working with multiline char [#65501](https://github.com/saltstack/salt/issues/65501) + * Warn when an un-closed transport client is being garbage collected. [#65554](https://github.com/saltstack/salt/issues/65554) + * Only generate the HMAC's for ``libssl.so.1.1`` and ``libcrypto.so.1.1`` if those files exist. [#65581](https://github.com/saltstack/salt/issues/65581) + * Fixed an issue where Salt Cloud would fail if it could not delete lingering + PAexec binaries [#65584](https://github.com/saltstack/salt/issues/65584) + + # Added + + * Added Salt support for Debian 12 [#64223](https://github.com/saltstack/salt/issues/64223) + * Added Salt support for Amazon Linux 2023 [#64455](https://github.com/saltstack/salt/issues/64455) + + # Security + + * Bump to `cryptography==41.0.4` due to https://github.com/advisories/GHSA-v8gr-m533-ghj9 [#65268](https://github.com/saltstack/salt/issues/65268) + * Bump to `cryptography==41.0.7` due to https://github.com/advisories/GHSA-jfhm-5ghh-2f97 [#65643](https://github.com/saltstack/salt/issues/65643) + + + -- Salt Project Packaging Tue, 12 Dec 2023 17:52:33 +0000 + salt (3006.4) stable; urgency=medium diff --git a/pkg/rpm/salt.spec b/pkg/rpm/salt.spec index 54da61d50dd0..9bb75580e03a 100644 --- a/pkg/rpm/salt.spec +++ b/pkg/rpm/salt.spec @@ -31,7 +31,7 @@ %define fish_dir %{_datadir}/fish/vendor_functions.d Name: salt -Version: 3006.4 +Version: 3006.5 Release: 0 Summary: A parallel remote execution system Group: System Environment/Daemons @@ -583,6 +583,80 @@ fi %changelog +* Tue Dec 12 2023 Salt Project Packaging - 3006.5 + +# Removed + +- Tech Debt - support for pysss removed due to functionality addition in Python 3.3 [#65029](https://github.com/saltstack/salt/issues/65029) + +# Fixed + +- Improved error message when state arguments are accidentally passed as a string [#38098](https://github.com/saltstack/salt/issues/38098) +- Allow `pip.install` to create a log file that is passed in if the parent directory is writeable [#44722](https://github.com/saltstack/salt/issues/44722) +- Fixed merging of complex pillar overrides with salt-ssh states [#59802](https://github.com/saltstack/salt/issues/59802) +- Fixed gpg pillar rendering with salt-ssh [#60002](https://github.com/saltstack/salt/issues/60002) +- Made salt-ssh states not re-render pillars unnecessarily [#62230](https://github.com/saltstack/salt/issues/62230) +- Made Salt maintain options in Debian package repo definitions [#64130](https://github.com/saltstack/salt/issues/64130) +- Migrated all [`invoke`](https://www.pyinvoke.org/) tasks to [`python-tools-scripts`](https://github.com/s0undt3ch/python-tools-scripts). + + * `tasks/docs.py` -> `tools/precommit/docs.py` + * `tasks/docstrings.py` -> `tools/precommit/docstrings.py` + * `tasks/loader.py` -> `tools/precommit/loader.py` + * `tasks/filemap.py` -> `tools/precommit/filemap.py` [#64374](https://github.com/saltstack/salt/issues/64374) +- Fix salt user login shell path in Debian packages [#64377](https://github.com/saltstack/salt/issues/64377) +- Fill out lsb_distrib_xxxx (best estimate) grains if problems with retrieving lsb_release data [#64473](https://github.com/saltstack/salt/issues/64473) +- Fixed an issue in the ``file.directory`` state where the ``children_only`` keyword + argument was not being respected. [#64497](https://github.com/saltstack/salt/issues/64497) +- Move salt.ufw to correct location /etc/ufw/applications.d/ [#64572](https://github.com/saltstack/salt/issues/64572) +- Fixed salt-ssh stacktrace when retcode is not an integer [#64575](https://github.com/saltstack/salt/issues/64575) +- Fixed SSH shell seldomly fails to report any exit code [#64588](https://github.com/saltstack/salt/issues/64588) +- Fixed some issues in x509_v2 execution module private key functions [#64597](https://github.com/saltstack/salt/issues/64597) +- Fixed grp.getgrall() in utils/user.py causing performance issues [#64888](https://github.com/saltstack/salt/issues/64888) +- Fix user.list_groups omits remote groups via sssd, etc. [#64953](https://github.com/saltstack/salt/issues/64953) +- Ensure sync from _grains occurs before attempting pillar compilation in case custom grain used in pillar file [#65027](https://github.com/saltstack/salt/issues/65027) +- Moved gitfs locks to salt working dir to avoid lock wipes [#65086](https://github.com/saltstack/salt/issues/65086) +- Only attempt to create a keys directory when `--gen-keys` is passed to the `salt-key` CLI [#65093](https://github.com/saltstack/salt/issues/65093) +- Fix nonce verification, request server replies do not stomp on eachother. [#65114](https://github.com/saltstack/salt/issues/65114) +- speed up yumpkg list_pkgs by not requiring digest or signature verification on lookup. [#65152](https://github.com/saltstack/salt/issues/65152) +- Fix pkg.latest failing on windows for winrepo packages where the package is already up to date [#65165](https://github.com/saltstack/salt/issues/65165) +- Ensure __kwarg__ is preserved when checking for kwargs. This change affects proxy minions when used with Deltaproxy, which had kwargs popped when targeting multiple minions id. [#65179](https://github.com/saltstack/salt/issues/65179) +- Fixes traceback when state id is an int in a reactor SLS file. [#65210](https://github.com/saltstack/salt/issues/65210) +- Install logrotate config as /etc/logrotate.d/salt-common for Debian packages + Remove broken /etc/logrotate.d/salt directory from 3006.3 if it exists. [#65231](https://github.com/saltstack/salt/issues/65231) +- Use ``sha256`` as the default ``hash_type``. It has been the default since Salt v2016.9 [#65287](https://github.com/saltstack/salt/issues/65287) +- Preserve ownership on log rotation [#65288](https://github.com/saltstack/salt/issues/65288) +- Ensure that the correct value of jid_inclue is passed if the argument is included in the passed keyword arguments. [#65302](https://github.com/saltstack/salt/issues/65302) +- Uprade relenv to 0.14.2 + - Update openssl to address CVE-2023-5363. + - Fix bug in openssl setup when openssl binary can't be found. + - Add M1 mac support. [#65316](https://github.com/saltstack/salt/issues/65316) +- Fix regex for filespec adding/deleting fcontext policy in selinux [#65340](https://github.com/saltstack/salt/issues/65340) +- Ensure CLI options take priority over Saltfile options [#65358](https://github.com/saltstack/salt/issues/65358) +- Test mode for state function `saltmod.wheel` no longer set's `result` to `(None,)` [#65372](https://github.com/saltstack/salt/issues/65372) +- Client only process events which tag conforms to an event return. [#65400](https://github.com/saltstack/salt/issues/65400) +- Fixes an issue setting user or machine policy on Windows when the Group Policy + directory is missing [#65411](https://github.com/saltstack/salt/issues/65411) +- Fix regression in file module which was not re-using a file client. [#65450](https://github.com/saltstack/salt/issues/65450) +- pip.installed state will now properly fail when a specified user does not exists [#65458](https://github.com/saltstack/salt/issues/65458) +- Publish channel connect callback method properly closes it's request channel. [#65464](https://github.com/saltstack/salt/issues/65464) +- Ensured the pillar in SSH wrapper modules is the same as the one used in template rendering when overrides are passed [#65483](https://github.com/saltstack/salt/issues/65483) +- Fix file.comment ignore_missing not working with multiline char [#65501](https://github.com/saltstack/salt/issues/65501) +- Warn when an un-closed transport client is being garbage collected. [#65554](https://github.com/saltstack/salt/issues/65554) +- Only generate the HMAC's for ``libssl.so.1.1`` and ``libcrypto.so.1.1`` if those files exist. [#65581](https://github.com/saltstack/salt/issues/65581) +- Fixed an issue where Salt Cloud would fail if it could not delete lingering + PAexec binaries [#65584](https://github.com/saltstack/salt/issues/65584) + +# Added + +- Added Salt support for Debian 12 [#64223](https://github.com/saltstack/salt/issues/64223) +- Added Salt support for Amazon Linux 2023 [#64455](https://github.com/saltstack/salt/issues/64455) + +# Security + +- Bump to `cryptography==41.0.4` due to https://github.com/advisories/GHSA-v8gr-m533-ghj9 [#65268](https://github.com/saltstack/salt/issues/65268) +- Bump to `cryptography==41.0.7` due to https://github.com/advisories/GHSA-jfhm-5ghh-2f97 [#65643](https://github.com/saltstack/salt/issues/65643) + + * Mon Oct 16 2023 Salt Project Packaging - 3006.4 # Security diff --git a/pkg/tests/files/check_imports.sls b/pkg/tests/files/check_imports.sls deleted file mode 100644 index 0dde9d6ad332..000000000000 --- a/pkg/tests/files/check_imports.sls +++ /dev/null @@ -1,53 +0,0 @@ -#!py -import importlib - -def run(): - config = {} - for test_import in [ - 'templates', 'platform', 'cli', 'executors', 'config', 'wheel', 'netapi', - 'cache', 'proxy', 'transport', 'metaproxy', 'modules', 'tokens', 'matchers', - 'acl', 'auth', 'log', 'engines', 'client', 'returners', 'runners', 'tops', - 'output', 'daemons', 'thorium', 'renderers', 'states', 'cloud', 'roster', - 'beacons', 'pillar', 'spm', 'utils', 'sdb', 'fileserver', 'defaults', - 'ext', 'queues', 'grains', 'serializers' - ]: - try: - import_name = "salt.{}".format(test_import) - importlib.import_module(import_name) - config['test_imports_succeeded'] = { - 'test.succeed_without_changes': [ - { - 'name': import_name - }, - ], - } - except ModuleNotFoundError as err: - config['test_imports_failed'] = { - 'test.fail_without_changes': [ - { - 'name': import_name, - 'comment': "The imports test failed. The error was: {}".format(err) - }, - ], - } - - for stdlib_import in ["telnetlib"]: - try: - importlib.import_module(stdlib_import) - config['stdlib_imports_succeeded'] = { - 'test.succeed_without_changes': [ - { - 'name': stdlib_import - }, - ], - } - except ModuleNotFoundError as err: - config['stdlib_imports_failed'] = { - 'test.fail_without_changes': [ - { - 'name': stdlib_import, - 'comment': "The stdlib imports test failed. The error was: {}".format(err) - }, - ], - } - return config diff --git a/pkg/tests/files/check_python.py b/pkg/tests/files/check_python.py deleted file mode 100644 index f1d46b76df7b..000000000000 --- a/pkg/tests/files/check_python.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys - -import salt.utils.data - -user_arg = sys.argv - -if user_arg[1] == "raise": - raise Exception("test") - -if salt.utils.data.is_true(user_arg[1]): - sys.exit(0) -else: - sys.exit(1) diff --git a/pkg/tests/integration/test_check_imports.py b/pkg/tests/integration/test_check_imports.py deleted file mode 100644 index 9a0f1c5fbe48..000000000000 --- a/pkg/tests/integration/test_check_imports.py +++ /dev/null @@ -1,22 +0,0 @@ -import logging - -import pytest -from saltfactories.utils.functional import MultiStateResult - -pytestmark = [ - pytest.mark.skip_on_windows, -] - -log = logging.getLogger(__name__) - - -def test_check_imports(salt_cli, salt_minion): - """ - Test imports - """ - ret = salt_cli.run("state.sls", "check_imports", minion_tgt=salt_minion.id) - assert ret.returncode == 0 - assert ret.data - result = MultiStateResult(raw=ret.data) - for state_ret in result: - assert state_ret.result is True diff --git a/pkg/tests/integration/test_python.py b/pkg/tests/integration/test_python.py deleted file mode 100644 index a7271994a853..000000000000 --- a/pkg/tests/integration/test_python.py +++ /dev/null @@ -1,37 +0,0 @@ -import subprocess - -import pytest - -from tests.support.helpers import TESTS_DIR - - -@pytest.fixture -def python_script_bin(install_salt): - # Tiamat builds run scripts via `salt python` - if not install_salt.relenv and not install_salt.classic: - return install_salt.binary_paths["python"][:1] + ["python"] - return install_salt.binary_paths["python"] - - -@pytest.mark.parametrize("exp_ret,user_arg", [(1, "false"), (0, "true")]) -def test_python_script(install_salt, exp_ret, user_arg, python_script_bin): - ret = install_salt.proc.run( - *(python_script_bin + [str(TESTS_DIR / "files" / "check_python.py"), user_arg]), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False, - universal_newlines=True, - ) - - assert ret.returncode == exp_ret, ret.stderr - - -def test_python_script_exception(install_salt, python_script_bin): - ret = install_salt.proc.run( - *(python_script_bin + [str(TESTS_DIR / "files" / "check_python.py"), "raise"]), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False, - universal_newlines=True, - ) - assert "Exception: test" in ret.stderr diff --git a/pkg/tests/integration/test_salt_pillar.py b/pkg/tests/integration/test_salt_pillar.py deleted file mode 100644 index 007bae21f965..000000000000 --- a/pkg/tests/integration/test_salt_pillar.py +++ /dev/null @@ -1,13 +0,0 @@ -import pytest - -pytestmark = [ - pytest.mark.skip_on_windows, -] - - -def test_salt_pillar(salt_cli, salt_minion): - """ - Test pillar.items - """ - ret = salt_cli.run("pillar.items", minion_tgt=salt_minion.id) - assert "info" in ret.data diff --git a/pkg/tests/integration/test_salt_state_file.py b/pkg/tests/integration/test_salt_state_file.py deleted file mode 100644 index b951b48bde66..000000000000 --- a/pkg/tests/integration/test_salt_state_file.py +++ /dev/null @@ -1,24 +0,0 @@ -import sys - -import pytest - -pytestmark = [ - pytest.mark.skip_on_windows, -] - - -def test_salt_state_file(salt_cli, salt_minion): - """ - Test state file - """ - if sys.platform.startswith("win"): - ret = salt_cli.run("state.apply", "win_states", minion_tgt=salt_minion.id) - else: - ret = salt_cli.run("state.apply", "states", minion_tgt=salt_minion.id) - - assert ret.data, ret - if ret.stdout and "Minion did not return" in ret.stdout: - pytest.skip("Skipping test, state took too long to apply") - sls_ret = ret.data[next(iter(ret.data))] - assert "changes" in sls_ret - assert "name" in sls_ret diff --git a/pkg/tests/support/coverage/sitecustomize.py b/pkg/tests/support/coverage/sitecustomize.py deleted file mode 100644 index bee2ff80f2f5..000000000000 --- a/pkg/tests/support/coverage/sitecustomize.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -Python will always try to import sitecustomize. -We use that fact to try and support code coverage for sub-processes -""" - -try: - import coverage - - coverage.process_startup() -except ImportError: - pass diff --git a/pkg/tests/support/paths.py b/pkg/tests/support/paths.py deleted file mode 100644 index a8a82bce0e09..000000000000 --- a/pkg/tests/support/paths.py +++ /dev/null @@ -1,102 +0,0 @@ -""" - :codeauthor: Pedro Algarvio (pedro@algarvio.me) - :copyright: Copyright 2017 by the SaltStack Team, see AUTHORS for more details. - :license: Apache 2.0, see LICENSE for more details. - - - tests.support.paths - ~~~~~~~~~~~~~~~~~~~ - - Tests related paths -""" - -import logging -import os -import re -import sys -import tempfile - -log = logging.getLogger(__name__) - -SALT_CODE_DIR = os.path.join( - os.path.dirname( - os.path.dirname( - os.path.dirname( - os.path.dirname(os.path.normpath(os.path.abspath(__file__))) - ) - ) - ), - "salt", -) -TESTS_DIR = os.path.join(os.path.dirname(SALT_CODE_DIR), "tests") -if TESTS_DIR.startswith("//"): - # Have we been given an initial double forward slash? Ditch it! - TESTS_DIR = TESTS_DIR[1:] -if sys.platform.startswith("win"): - TESTS_DIR = os.path.normcase(TESTS_DIR) -CODE_DIR = os.path.dirname(TESTS_DIR) -if sys.platform.startswith("win"): - CODE_DIR = CODE_DIR.replace("\\", "\\\\") -UNIT_TEST_DIR = os.path.join(TESTS_DIR, "unit") -INTEGRATION_TEST_DIR = os.path.join(TESTS_DIR, "integration") - -# Let's inject CODE_DIR so salt is importable if not there already -if TESTS_DIR in sys.path: - sys.path.remove(TESTS_DIR) -if CODE_DIR in sys.path and sys.path[0] != CODE_DIR: - sys.path.remove(CODE_DIR) -if CODE_DIR not in sys.path: - sys.path.insert(0, CODE_DIR) -if TESTS_DIR not in sys.path: - sys.path.insert(1, TESTS_DIR) - -SYS_TMP_DIR = os.path.abspath( - os.path.realpath( - # Avoid ${TMPDIR} and gettempdir() on MacOS as they yield a base path too long - # for unix sockets: ``error: AF_UNIX path too long`` - # Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR} - os.environ.get("TMPDIR", tempfile.gettempdir()) - if not sys.platform.startswith("darwin") - else "/tmp" - ) -) -TMP = os.path.join(SYS_TMP_DIR, "salt-tests-tmpdir") -TMP_ROOT_DIR = os.path.join(TMP, "rootdir") -FILES = os.path.join(INTEGRATION_TEST_DIR, "files") -BASE_FILES = os.path.join(INTEGRATION_TEST_DIR, "files", "file", "base") -PROD_FILES = os.path.join(INTEGRATION_TEST_DIR, "files", "file", "prod") -PYEXEC = "python{}.{}".format(*sys.version_info) -MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, "mockbin") -SCRIPT_DIR = os.path.join(CODE_DIR, "scripts") -TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-state-tree") -TMP_PILLAR_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-pillar-tree") -TMP_PRODENV_STATE_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-prodenv-state-tree") -TMP_PRODENV_PILLAR_TREE = os.path.join(SYS_TMP_DIR, "salt-temp-prodenv-pillar-tree") -TMP_CONF_DIR = TMP_MINION_CONF_DIR = os.path.join(TMP, "config") -TMP_SUB_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "sub-minion") -TMP_SYNDIC_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, "syndic-minion") -TMP_SYNDIC_MASTER_CONF_DIR = os.path.join(TMP_CONF_DIR, "syndic-master") -TMP_SSH_CONF_DIR = TMP_MINION_CONF_DIR -CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, "files", "conf") -PILLAR_DIR = os.path.join(FILES, "pillar") -TMP_SCRIPT_DIR = os.path.join(TMP, "scripts") -ENGINES_DIR = os.path.join(FILES, "engines") -LOG_HANDLERS_DIR = os.path.join(FILES, "log_handlers") - - -def list_test_mods(): - """ - A generator which returns all of the test files - """ - test_re = re.compile(r"^test_.+\.py$") - for dirname in (UNIT_TEST_DIR, INTEGRATION_TEST_DIR): - test_type = os.path.basename(dirname) - for root, _, files in os.walk(dirname): - parent_mod = root[len(dirname) :].lstrip(os.sep).replace(os.sep, ".") - for filename in files: - if test_re.match(filename): - mod_name = test_type - if parent_mod: - mod_name += "." + parent_mod - mod_name += "." + filename[:-3] - yield mod_name diff --git a/pkg/tests/support/runtests.py b/pkg/tests/support/runtests.py deleted file mode 100644 index ce5c9644cd3a..000000000000 --- a/pkg/tests/support/runtests.py +++ /dev/null @@ -1,209 +0,0 @@ -""" - :codeauthor: Pedro Algarvio (pedro@algarvio.me) - - .. _runtime_vars: - - Runtime Variables - ----------------- - - :command:`salt-runtests` provides a variable, :py:attr:`RUNTIME_VARS` which has some common paths defined at - startup: - - .. autoattribute:: tests.support.runtests.RUNTIME_VARS - :annotation: - - :TMP: Tests suite temporary directory - :TMP_CONF_DIR: Configuration directory from where the daemons that :command:`salt-runtests` starts get their - configuration files. - :TMP_CONF_MASTER_INCLUDES: Salt Master configuration files includes directory. See - :salt_conf_master:`default_include`. - :TMP_CONF_MINION_INCLUDES: Salt Minion configuration files includes directory. Seei - :salt_conf_minion:`include`. - :TMP_CONF_CLOUD_INCLUDES: Salt cloud configuration files includes directory. The same as the salt master and - minion includes configuration, though under a different directory name. - :TMP_CONF_CLOUD_PROFILE_INCLUDES: Salt cloud profiles configuration files includes directory. Same as above. - :TMP_CONF_CLOUD_PROVIDER_INCLUDES: Salt cloud providers configuration files includes directory. Same as above. - :TMP_SCRIPT_DIR: Temporary scripts directory from where the Salt CLI tools will be called when running tests. - :TMP_SALT_INTEGRATION_FILES: Temporary directory from where Salt's test suite integration files are copied to. - :TMP_BASEENV_STATE_TREE: Salt master's **base** environment state tree directory - :TMP_PRODENV_STATE_TREE: Salt master's **production** environment state tree directory - :TMP_BASEENV_PILLAR_TREE: Salt master's **base** environment pillar tree directory - :TMP_PRODENV_PILLAR_TREE: Salt master's **production** environment pillar tree directory - - - Use it on your test case in case of need. As simple as: - - .. code-block:: python - - import os - from tests.support.runtests import RUNTIME_VARS - - # Path to the testing minion configuration file - minion_config_path = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion') - - .. _`pytest`: http://pytest.org - """ - -import logging -import os -import shutil - -import salt.utils.path -import salt.utils.platform -import tests.support.paths as paths - -try: - import pwd -except ImportError: - import salt.utils.win_functions - -log = logging.getLogger(__name__) - - -def this_user(): - """ - Get the user associated with the current process. - """ - if salt.utils.platform.is_windows(): - return salt.utils.win_functions.get_current_user(with_domain=False) - return pwd.getpwuid(os.getuid())[0] - - -class RootsDict(dict): - def merge(self, data): - for key, values in data.items(): - if key not in self: - self[key] = values - continue - for value in values: - if value not in self[key]: - self[key].append(value) - return self - - def to_dict(self): - return dict(self) - - -def recursive_copytree(source, destination, overwrite=False): - for root, dirs, files in os.walk(source): - for item in dirs: - src_path = os.path.join(root, item) - dst_path = os.path.join( - destination, src_path.replace(source, "").lstrip(os.sep) - ) - if not os.path.exists(dst_path): - log.debug("Creating directory: %s", dst_path) - os.makedirs(dst_path) - for item in files: - src_path = os.path.join(root, item) - dst_path = os.path.join( - destination, src_path.replace(source, "").lstrip(os.sep) - ) - if os.path.exists(dst_path) and not overwrite: - if os.stat(src_path).st_mtime > os.stat(dst_path).st_mtime: - log.debug("Copying %s to %s", src_path, dst_path) - shutil.copy2(src_path, dst_path) - else: - if not os.path.isdir(os.path.dirname(dst_path)): - log.debug("Creating directory: %s", os.path.dirname(dst_path)) - os.makedirs(os.path.dirname(dst_path)) - log.debug("Copying %s to %s", src_path, dst_path) - shutil.copy2(src_path, dst_path) - - -class RuntimeVars: - - __self_attributes__ = ("_vars", "_locked", "lock") - - def __init__(self, **kwargs): - self._vars = kwargs - self._locked = False - - def lock(self): - # Late import - from salt.utils.immutabletypes import freeze - - frozen_vars = freeze(self._vars.copy()) - self._vars = frozen_vars - self._locked = True - - def __iter__(self): - yield from self._vars.items() - - def __getattribute__(self, name): - if name in object.__getattribute__(self, "_vars"): - return object.__getattribute__(self, "_vars")[name] - return object.__getattribute__(self, name) - - def __setattr__(self, name, value): - if getattr(self, "_locked", False) is True: - raise RuntimeError( - "After {} is locked, no additional data can be added to it".format( - self.__class__.__name__ - ) - ) - if name in object.__getattribute__(self, "__self_attributes__"): - object.__setattr__(self, name, value) - return - self._vars[name] = value - - -# <---- Helper Methods ----------------------------------------------------------------------------------------------- - - -# ----- Global Variables --------------------------------------------------------------------------------------------> -XML_OUTPUT_DIR = os.environ.get( - "SALT_XML_TEST_REPORTS_DIR", os.path.join(paths.TMP, "xml-test-reports") -) -# <---- Global Variables --------------------------------------------------------------------------------------------- - - -# ----- Tests Runtime Variables -------------------------------------------------------------------------------------> - -RUNTIME_VARS = RuntimeVars( - TMP=paths.TMP, - SYS_TMP_DIR=paths.SYS_TMP_DIR, - FILES=paths.FILES, - CONF_DIR=paths.CONF_DIR, - PILLAR_DIR=paths.PILLAR_DIR, - ENGINES_DIR=paths.ENGINES_DIR, - LOG_HANDLERS_DIR=paths.LOG_HANDLERS_DIR, - TMP_ROOT_DIR=paths.TMP_ROOT_DIR, - TMP_CONF_DIR=paths.TMP_CONF_DIR, - TMP_MINION_CONF_DIR=paths.TMP_MINION_CONF_DIR, - TMP_CONF_MASTER_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "master.d"), - TMP_CONF_MINION_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "minion.d"), - TMP_CONF_PROXY_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "proxy.d"), - TMP_CONF_CLOUD_INCLUDES=os.path.join(paths.TMP_CONF_DIR, "cloud.conf.d"), - TMP_CONF_CLOUD_PROFILE_INCLUDES=os.path.join( - paths.TMP_CONF_DIR, "cloud.profiles.d" - ), - TMP_CONF_CLOUD_PROVIDER_INCLUDES=os.path.join( - paths.TMP_CONF_DIR, "cloud.providers.d" - ), - TMP_SUB_MINION_CONF_DIR=paths.TMP_SUB_MINION_CONF_DIR, - TMP_SYNDIC_MASTER_CONF_DIR=paths.TMP_SYNDIC_MASTER_CONF_DIR, - TMP_SYNDIC_MINION_CONF_DIR=paths.TMP_SYNDIC_MINION_CONF_DIR, - TMP_SSH_CONF_DIR=paths.TMP_SSH_CONF_DIR, - TMP_SCRIPT_DIR=paths.TMP_SCRIPT_DIR, - TMP_STATE_TREE=paths.TMP_STATE_TREE, - TMP_BASEENV_STATE_TREE=paths.TMP_STATE_TREE, - TMP_PILLAR_TREE=paths.TMP_PILLAR_TREE, - TMP_BASEENV_PILLAR_TREE=paths.TMP_PILLAR_TREE, - TMP_PRODENV_STATE_TREE=paths.TMP_PRODENV_STATE_TREE, - TMP_PRODENV_PILLAR_TREE=paths.TMP_PRODENV_PILLAR_TREE, - SHELL_TRUE_PATH=salt.utils.path.which("true") - if not salt.utils.platform.is_windows() - else "cmd /c exit 0 > nul", - SHELL_FALSE_PATH=salt.utils.path.which("false") - if not salt.utils.platform.is_windows() - else "cmd /c exit 1 > nul", - RUNNING_TESTS_USER=this_user(), - RUNTIME_CONFIGS={}, - CODE_DIR=paths.CODE_DIR, - SALT_CODE_DIR=paths.SALT_CODE_DIR, - BASE_FILES=paths.BASE_FILES, - PROD_FILES=paths.PROD_FILES, - TESTS_DIR=paths.TESTS_DIR, -) -# <---- Tests Runtime Variables -------------------------------------------------------------------------------------- diff --git a/pkg/tests/support/sminion.py b/pkg/tests/support/sminion.py deleted file mode 100644 index abf45fd7bde9..000000000000 --- a/pkg/tests/support/sminion.py +++ /dev/null @@ -1,256 +0,0 @@ -""" -tests.support.sminion -~~~~~~~~~~~~~~~~~~~~~ - -SMinion's support functions -""" - -import fnmatch -import hashlib -import logging -import os -import shutil -import sys - -import salt.minion -import salt.utils.path -import salt.utils.stringutils -from tests.support.runtests import RUNTIME_VARS - -log = logging.getLogger(__name__) - -DEFAULT_SMINION_ID = "pytest-internal-sminion" - - -def build_minion_opts( - minion_id=None, - root_dir=None, - initial_conf_file=None, - minion_opts_overrides=None, - skip_cached_opts=False, - cache_opts=True, - minion_role=None, -): - if minion_id is None: - minion_id = DEFAULT_SMINION_ID - if skip_cached_opts is False: - try: - opts_cache = build_minion_opts.__cached_opts__ - except AttributeError: - opts_cache = build_minion_opts.__cached_opts__ = {} - cached_opts = opts_cache.get(minion_id) - if cached_opts: - return cached_opts - - log.info("Generating testing minion %r configuration...", minion_id) - if root_dir is None: - hashed_minion_id = hashlib.sha1() - hashed_minion_id.update(salt.utils.stringutils.to_bytes(minion_id)) - root_dir = os.path.join( - RUNTIME_VARS.TMP_ROOT_DIR, hashed_minion_id.hexdigest()[:6] - ) - - if initial_conf_file is not None: - minion_opts = salt.config._read_conf_file( - initial_conf_file - ) # pylint: disable=protected-access - else: - minion_opts = {} - - conf_dir = os.path.join(root_dir, "conf") - conf_file = os.path.join(conf_dir, "minion") - - minion_opts["id"] = minion_id - minion_opts["conf_file"] = conf_file - minion_opts["root_dir"] = root_dir - minion_opts["cachedir"] = "cache" - minion_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER - minion_opts["pki_dir"] = "pki" - minion_opts["hosts.file"] = os.path.join(RUNTIME_VARS.TMP_ROOT_DIR, "hosts") - minion_opts["aliases.file"] = os.path.join(RUNTIME_VARS.TMP_ROOT_DIR, "aliases") - minion_opts["file_client"] = "local" - minion_opts["server_id_use_crc"] = "adler32" - minion_opts["pillar_roots"] = {"base": [RUNTIME_VARS.TMP_PILLAR_TREE]} - minion_opts["file_roots"] = { - "base": [ - # Let's support runtime created files that can be used like: - # salt://my-temp-file.txt - RUNTIME_VARS.TMP_STATE_TREE - ], - # Alternate root to test __env__ choices - "prod": [ - os.path.join(RUNTIME_VARS.FILES, "file", "prod"), - RUNTIME_VARS.TMP_PRODENV_STATE_TREE, - ], - } - if initial_conf_file and initial_conf_file.startswith(RUNTIME_VARS.FILES): - # We assume we were passed a minion configuration file defined fo testing and, as such - # we define the file and pillar roots to include the testing states/pillar trees - minion_opts["pillar_roots"]["base"].append( - os.path.join(RUNTIME_VARS.FILES, "pillar", "base"), - ) - minion_opts["file_roots"]["base"].append( - os.path.join(RUNTIME_VARS.FILES, "file", "base"), - ) - minion_opts["file_roots"]["prod"].append( - os.path.join(RUNTIME_VARS.FILES, "file", "prod"), - ) - - # We need to copy the extension modules into the new master root_dir or - # it will be prefixed by it - extension_modules_path = os.path.join(root_dir, "extension_modules") - if not os.path.exists(extension_modules_path): - shutil.copytree( - os.path.join(RUNTIME_VARS.FILES, "extension_modules"), - extension_modules_path, - ) - minion_opts["extension_modules"] = extension_modules_path - - # Custom grains - if "grains" not in minion_opts: - minion_opts["grains"] = {} - if minion_role is not None: - minion_opts["grains"]["role"] = minion_role - - # Under windows we can't seem to properly create a virtualenv off of another - # virtualenv, we can on linux but we will still point to the virtualenv binary - # outside the virtualenv running the test suite, if that's the case. - try: - real_prefix = sys.real_prefix - # The above attribute exists, this is a virtualenv - if salt.utils.platform.is_windows(): - virtualenv_binary = os.path.join(real_prefix, "Scripts", "virtualenv.exe") - else: - # We need to remove the virtualenv from PATH or we'll get the virtualenv binary - # from within the virtualenv, we don't want that - path = os.environ.get("PATH") - if path is not None: - path_items = path.split(os.pathsep) - for item in path_items[:]: - if item.startswith(sys.base_prefix): - path_items.remove(item) - os.environ["PATH"] = os.pathsep.join(path_items) - virtualenv_binary = salt.utils.path.which("virtualenv") - if path is not None: - # Restore previous environ PATH - os.environ["PATH"] = path - if not virtualenv_binary.startswith(real_prefix): - virtualenv_binary = None - if virtualenv_binary and not os.path.exists(virtualenv_binary): - # It doesn't exist?! - virtualenv_binary = None - except AttributeError: - # We're not running inside a virtualenv - virtualenv_binary = None - if virtualenv_binary: - minion_opts["venv_bin"] = virtualenv_binary - - # Override minion_opts with minion_opts_overrides - if minion_opts_overrides: - minion_opts.update(minion_opts_overrides) - - if not os.path.exists(conf_dir): - os.makedirs(conf_dir) - - with salt.utils.files.fopen(conf_file, "w") as fp_: - salt.utils.yaml.safe_dump(minion_opts, fp_, default_flow_style=False) - - log.info("Generating testing minion %r configuration completed.", minion_id) - minion_opts = salt.config.minion_config( - conf_file, minion_id=minion_id, cache_minion_id=True - ) - salt.utils.verify.verify_env( - [ - os.path.join(minion_opts["pki_dir"], "accepted"), - os.path.join(minion_opts["pki_dir"], "rejected"), - os.path.join(minion_opts["pki_dir"], "pending"), - os.path.dirname(minion_opts["log_file"]), - minion_opts["extension_modules"], - minion_opts["cachedir"], - minion_opts["sock_dir"], - RUNTIME_VARS.TMP_STATE_TREE, - RUNTIME_VARS.TMP_PILLAR_TREE, - RUNTIME_VARS.TMP_PRODENV_STATE_TREE, - RUNTIME_VARS.TMP, - ], - RUNTIME_VARS.RUNNING_TESTS_USER, - root_dir=root_dir, - ) - if cache_opts: - try: - opts_cache = build_minion_opts.__cached_opts__ - except AttributeError: - opts_cache = build_minion_opts.__cached_opts__ = {} - opts_cache[minion_id] = minion_opts - return minion_opts - - -def create_sminion( - minion_id=None, - root_dir=None, - initial_conf_file=None, - sminion_cls=salt.minion.SMinion, - minion_opts_overrides=None, - skip_cached_minion=False, - cache_sminion=True, -): - if minion_id is None: - minion_id = DEFAULT_SMINION_ID - if skip_cached_minion is False: - try: - minions_cache = create_sminion.__cached_minions__ - except AttributeError: - create_sminion.__cached_minions__ = {} - cached_minion = create_sminion.__cached_minions__.get(minion_id) - if cached_minion: - return cached_minion - minion_opts = build_minion_opts( - minion_id=minion_id, - root_dir=root_dir, - initial_conf_file=initial_conf_file, - minion_opts_overrides=minion_opts_overrides, - skip_cached_opts=skip_cached_minion, - cache_opts=cache_sminion, - ) - log.info("Instantiating a testing %s(%s)", sminion_cls.__name__, minion_id) - sminion = sminion_cls(minion_opts) - if cache_sminion: - try: - minions_cache = create_sminion.__cached_minions__ - except AttributeError: - minions_cache = create_sminion.__cached_minions__ = {} - minions_cache[minion_id] = sminion - return sminion - - -def check_required_sminion_attributes(sminion_attr, required_items): - """ - :param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states' - :param required_items: The items that must be part of the designated sminion attribute for the decorated test - :return The packages that are not available - """ - required_salt_items = set(required_items) - sminion = create_sminion(minion_id=DEFAULT_SMINION_ID) - available_items = list(getattr(sminion, sminion_attr)) - not_available_items = set() - - name = "__not_available_{items}s__".format(items=sminion_attr) - if not hasattr(sminion, name): - setattr(sminion, name, set()) - - cached_not_available_items = getattr(sminion, name) - - for not_available_item in cached_not_available_items: - if not_available_item in required_salt_items: - not_available_items.add(not_available_item) - required_salt_items.remove(not_available_item) - - for required_item_name in required_salt_items: - search_name = required_item_name - if "." not in search_name: - search_name += ".*" - if not fnmatch.filter(available_items, search_name): - not_available_items.add(required_item_name) - cached_not_available_items.add(required_item_name) - - return not_available_items diff --git a/pkg/tests/upgrade/__init__.py b/pkg/tests/upgrade/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/requirements/static/ci/py3.10/tools.txt b/requirements/static/ci/py3.10/tools.txt index 6b5c89f60ec4..58499d39fd28 100644 --- a/requirements/static/ci/py3.10/tools.txt +++ b/requirements/static/ci/py3.10/tools.txt @@ -50,7 +50,7 @@ s3transfer==0.6.1 # via boto3 six==1.16.0 # via python-dateutil -typing-extensions==4.8.0 +typing-extensions==4.2.0 # via python-tools-scripts urllib3==1.26.18 # via diff --git a/requirements/static/ci/py3.9/tools.txt b/requirements/static/ci/py3.9/tools.txt index 35550acdbe65..ccc4fa893182 100644 --- a/requirements/static/ci/py3.9/tools.txt +++ b/requirements/static/ci/py3.9/tools.txt @@ -50,7 +50,7 @@ s3transfer==0.6.1 # via boto3 six==1.16.0 # via python-dateutil -typing-extensions==4.8.0 +typing-extensions==4.2.0 # via python-tools-scripts urllib3==1.26.18 # via diff --git a/salt/client/ssh/client.py b/salt/client/ssh/client.py index be9247cb157e..8727ce23c3c2 100644 --- a/salt/client/ssh/client.py +++ b/salt/client/ssh/client.py @@ -39,6 +39,10 @@ def __init__( # Salt API should never offer a custom roster! self.opts["__disable_custom_roster"] = disable_custom_roster + # Pillar compilation and nested SSH calls require the correct config_dir + # in __opts__, otherwise we will use the SSH minion's one later. + if "config_dir" not in self.opts: + self.opts["config_dir"] = os.path.dirname(c_path) def sanitize_kwargs(self, kwargs): roster_vals = [ diff --git a/salt/grains/core.py b/salt/grains/core.py index 52b55b47c2ab..4667f626e16d 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -478,7 +478,7 @@ def _bsd_cpudata(osdata): return grains -def _sunos_cpudata(): +def _sunos_cpudata(): # pragma: no cover """ Return the CPU information for Solaris-like systems """ @@ -510,7 +510,7 @@ def _sunos_cpudata(): return grains -def _aix_cpudata(): +def _aix_cpudata(): # pragma: no cover """ Return CPU information for AIX systems """ @@ -613,7 +613,7 @@ def _bsd_memdata(osdata): return grains -def _sunos_memdata(): +def _sunos_memdata(): # pragma: no cover """ Return the memory information for SunOS-like systems """ @@ -637,7 +637,7 @@ def _sunos_memdata(): return grains -def _aix_memdata(): +def _aix_memdata(): # pragma: no cover """ Return the memory information for AIX systems """ @@ -691,16 +691,16 @@ def _memdata(osdata): grains.update(_bsd_memdata(osdata)) elif osdata["kernel"] == "Darwin": grains.update(_osx_memdata()) - elif osdata["kernel"] == "SunOS": - grains.update(_sunos_memdata()) - elif osdata["kernel"] == "AIX": - grains.update(_aix_memdata()) + elif osdata["kernel"] == "SunOS": # pragma: no cover + grains.update(_sunos_memdata()) # pragma: no cover + elif osdata["kernel"] == "AIX": # pragma: no cover + grains.update(_aix_memdata()) # pragma: no cover elif osdata["kernel"] == "Windows" and HAS_WMI: grains.update(_windows_memdata()) return grains -def _aix_get_machine_id(): +def _aix_get_machine_id(): # pragma: no cover """ Parse the output of lsattr -El sys0 for os_uuid """ @@ -2473,7 +2473,7 @@ def _smartos_os_data(): return grains -def _sunos_release(): +def _sunos_release(): # pragma: no cover grains = {} with salt.utils.files.fopen("/etc/release", "r") as fp_: rel_data = fp_.read() diff --git a/salt/modules/mac_keychain.py b/salt/modules/mac_keychain.py index 7fdc162b9aa2..978d214ebf26 100644 --- a/salt/modules/mac_keychain.py +++ b/salt/modules/mac_keychain.py @@ -122,14 +122,14 @@ def list_certs(keychain="/Library/Keychains/System.keychain"): salt '*' keychain.list_certs """ cmd = ( - 'security find-certificate -a {} | grep -o "alis".*\\" | ' + 'security find-certificate -a {} | grep -o "alis.*" | ' "grep -o '\\\"[-A-Za-z0-9.:() ]*\\\"'".format(shlex.quote(keychain)) ) out = __salt__["cmd.run"](cmd, python_shell=True) return out.replace('"', "").split("\n") -def get_friendly_name(cert, password): +def get_friendly_name(cert, password, legacy=False): """ Get the friendly name of the given certificate @@ -143,15 +143,26 @@ def get_friendly_name(cert, password): Note: The password given here will show up as plaintext in the returned job info. + legacy + Assume legacy format for certificate. + CLI Example: .. code-block:: bash salt '*' keychain.get_friendly_name /tmp/test.p12 test123 + + salt '*' keychain.get_friendly_name /tmp/test.p12 test123 legacy=True """ + openssl_cmd = "openssl pkcs12" + if legacy: + openssl_cmd = f"{openssl_cmd} -legacy" + cmd = ( - "openssl pkcs12 -in {} -passin pass:{} -info -nodes -nokeys 2> /dev/null | " - "grep friendlyName:".format(shlex.quote(cert), shlex.quote(password)) + "{} -in {} -passin pass:{} -info -nodes -nokeys 2> /dev/null | " + "grep friendlyName:".format( + openssl_cmd, shlex.quote(cert), shlex.quote(password) + ) ) out = __salt__["cmd.run"](cmd, python_shell=True) return out.replace("friendlyName: ", "").strip() diff --git a/salt/modules/mac_power.py b/salt/modules/mac_power.py index 01fc561e834b..efdca6528465 100644 --- a/salt/modules/mac_power.py +++ b/salt/modules/mac_power.py @@ -68,7 +68,7 @@ def _validate_sleep(minutes): ) raise SaltInvocationError(msg) else: - msg = "Unknown Variable Type Passed for Minutes.\nPassed: {}".format(minutes) + msg = f"Unknown Variable Type Passed for Minutes.\nPassed: {minutes}" raise SaltInvocationError(msg) @@ -115,7 +115,7 @@ def set_sleep(minutes): salt '*' power.set_sleep never """ value = _validate_sleep(minutes) - cmd = "systemsetup -setsleep {}".format(value) + cmd = f"systemsetup -setsleep {value}" salt.utils.mac_utils.execute_return_success(cmd) state = [] @@ -165,7 +165,7 @@ def set_computer_sleep(minutes): salt '*' power.set_computer_sleep off """ value = _validate_sleep(minutes) - cmd = "systemsetup -setcomputersleep {}".format(value) + cmd = f"systemsetup -setcomputersleep {value}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( @@ -210,7 +210,7 @@ def set_display_sleep(minutes): salt '*' power.set_display_sleep off """ value = _validate_sleep(minutes) - cmd = "systemsetup -setdisplaysleep {}".format(value) + cmd = f"systemsetup -setdisplaysleep {value}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( @@ -255,7 +255,7 @@ def set_harddisk_sleep(minutes): salt '*' power.set_harddisk_sleep off """ value = _validate_sleep(minutes) - cmd = "systemsetup -setharddisksleep {}".format(value) + cmd = f"systemsetup -setharddisksleep {value}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( @@ -303,12 +303,13 @@ def set_wake_on_modem(enabled): salt '*' power.set_wake_on_modem True """ state = salt.utils.mac_utils.validate_enabled(enabled) - cmd = "systemsetup -setwakeonmodem {}".format(state) + cmd = f"systemsetup -setwakeonmodem {state}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, + True, ) @@ -353,12 +354,13 @@ def set_wake_on_network(enabled): salt '*' power.set_wake_on_network True """ state = salt.utils.mac_utils.validate_enabled(enabled) - cmd = "systemsetup -setwakeonnetworkaccess {}".format(state) + cmd = f"systemsetup -setwakeonnetworkaccess {state}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, + True, ) @@ -403,12 +405,13 @@ def set_restart_power_failure(enabled): salt '*' power.set_restart_power_failure True """ state = salt.utils.mac_utils.validate_enabled(enabled) - cmd = "systemsetup -setrestartpowerfailure {}".format(state) + cmd = f"systemsetup -setrestartpowerfailure {state}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, + True, ) @@ -453,7 +456,7 @@ def set_restart_freeze(enabled): salt '*' power.set_restart_freeze True """ state = salt.utils.mac_utils.validate_enabled(enabled) - cmd = "systemsetup -setrestartfreeze {}".format(state) + cmd = f"systemsetup -setrestartfreeze {state}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated(state, get_restart_freeze, True) @@ -502,10 +505,11 @@ def set_sleep_on_power_button(enabled): salt '*' power.set_sleep_on_power_button True """ state = salt.utils.mac_utils.validate_enabled(enabled) - cmd = "systemsetup -setallowpowerbuttontosleepcomputer {}".format(state) + cmd = f"systemsetup -setallowpowerbuttontosleepcomputer {state}" salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, + True, ) diff --git a/salt/modules/openscap.py b/salt/modules/openscap.py index 28df15219067..fb03a3e950af 100644 --- a/salt/modules/openscap.py +++ b/salt/modules/openscap.py @@ -3,26 +3,15 @@ """ - +import argparse import os.path import shlex import shutil +import subprocess import tempfile -from subprocess import PIPE, Popen import salt.utils.versions -ArgumentParser = object - -try: - import argparse # pylint: disable=minimum-python-version - - ArgumentParser = argparse.ArgumentParser - HAS_ARGPARSE = True -except ImportError: # python 2.6 - HAS_ARGPARSE = False - - _XCCDF_MAP = { "eval": { "parser_arguments": [(("--profile",), {"required": True})], @@ -35,11 +24,7 @@ } -def __virtual__(): - return HAS_ARGPARSE, "argparse module is required." - - -class _ArgumentParser(ArgumentParser): +class _ArgumentParser(argparse.ArgumentParser): def __init__(self, action=None, *args, **kwargs): super().__init__(*args, prog="oscap", **kwargs) self.add_argument("action", choices=["eval"]) @@ -47,7 +32,7 @@ def __init__(self, action=None, *args, **kwargs): for params, kwparams in _XCCDF_MAP["eval"]["parser_arguments"]: self.add_argument(*params, **kwparams) - def error(self, message, *args, **kwargs): + def error(self, message, *args, **kwargs): # pylint: disable=arguments-differ raise Exception(message) @@ -168,7 +153,9 @@ def xccdf_eval( if success: tempdir = tempfile.mkdtemp() - proc = Popen(cmd_opts, stdout=PIPE, stderr=PIPE, cwd=tempdir) + proc = subprocess.Popen( + cmd_opts, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tempdir + ) (stdoutdata, error) = proc.communicate() success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False) if proc.returncode < 0: @@ -225,12 +212,18 @@ def xccdf(params): if success: cmd = _XCCDF_MAP[action]["cmd_pattern"].format(args.profile, policy) tempdir = tempfile.mkdtemp() - proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, cwd=tempdir) + proc = subprocess.Popen( + shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=tempdir, + ) (stdoutdata, error) = proc.communicate() success = _OSCAP_EXIT_CODES_MAP.get(proc.returncode, False) if proc.returncode < 0: error += f"\nKilled by signal {proc.returncode}\n".encode("ascii") returncode = proc.returncode + success = _OSCAP_EXIT_CODES_MAP.get(returncode, False) if success: if not __salt__["cp.push_dir"](tempdir): success = False diff --git a/salt/modules/ps.py b/salt/modules/ps.py index 0873a534d941..d79d8c932c12 100644 --- a/salt/modules/ps.py +++ b/salt/modules/ps.py @@ -590,7 +590,7 @@ def boot_time(time_format=None): try: return b_time.strftime(time_format) except TypeError as exc: - raise SaltInvocationError("Invalid format string: {}".format(exc)) + raise SaltInvocationError(f"Invalid format string: {exc}") return b_time @@ -648,34 +648,8 @@ def get_users(): salt '*' ps.get_users """ - try: - recs = psutil.users() - return [dict(x._asdict()) for x in recs] - except AttributeError: - # get_users is only present in psutil > v0.5.0 - # try utmp - try: - import utmp # pylint: disable=import-error - - result = [] - while True: - rec = utmp.utmpaccess.getutent() - if rec is None: - return result - elif rec[0] == 7: - started = rec[8] - if isinstance(started, tuple): - started = started[0] - result.append( - { - "name": rec[4], - "terminal": rec[2], - "started": started, - "host": rec[5], - } - ) - except ImportError: - return False + recs = psutil.users() + return [dict(x._asdict()) for x in recs] def lsof(name): diff --git a/salt/modules/system.py b/salt/modules/system.py index 18c062878b43..c9e3db3f7b55 100644 --- a/salt/modules/system.py +++ b/salt/modules/system.py @@ -67,7 +67,7 @@ def init(runlevel): salt '*' system.init 3 """ - cmd = ["init", "{}".format(runlevel)] + cmd = ["init", f"{runlevel}"] ret = __salt__["cmd.run"](cmd, python_shell=False) return ret @@ -100,7 +100,7 @@ def reboot(at_time=None): salt '*' system.reboot """ - cmd = ["shutdown", "-r", ("{}".format(at_time) if at_time else "now")] + cmd = ["shutdown", "-r", (f"{at_time}" if at_time else "now")] ret = __salt__["cmd.run"](cmd, python_shell=False) return ret @@ -128,7 +128,7 @@ def shutdown(at_time=None): else: flag = "-h" - cmd = ["shutdown", flag, ("{}".format(at_time) if at_time else "now")] + cmd = ["shutdown", flag, (f"{at_time}" if at_time else "now")] ret = __salt__["cmd.run"](cmd, python_shell=False) return ret @@ -595,7 +595,7 @@ def set_computer_desc(desc): pass pattern = re.compile(r"^\s*PRETTY_HOSTNAME=(.*)$") - new_line = salt.utils.stringutils.to_str('PRETTY_HOSTNAME="{}"'.format(desc)) + new_line = salt.utils.stringutils.to_str(f'PRETTY_HOSTNAME="{desc}"') try: with salt.utils.files.fopen("/etc/machine-info", "r+") as mach_info: lines = mach_info.readlines() @@ -678,10 +678,10 @@ def set_reboot_required_witnessed(): os.makedirs(dir_path) except OSError as ex: raise SaltInvocationError( - "Error creating {} (-{}): {}".format(dir_path, ex.errno, ex.strerror) + f"Error creating {dir_path} (-{ex.errno}): {ex.strerror}" ) - rdict = __salt__["cmd.run_all"]("touch {}".format(NILRT_REBOOT_WITNESS_PATH)) + rdict = __salt__["cmd.run_all"](f"touch {NILRT_REBOOT_WITNESS_PATH}") errcode = rdict["retcode"] return errcode == 0 diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index b854ffb818b7..016af32a3e69 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -135,7 +135,7 @@ def present(name, acl_type, acl_name="", perms="", recurse=False, force=False): _octal_lookup = {0: "-", 1: "r", 2: "w", 4: "x"} if not os.path.exists(name): - ret["comment"] = "{} does not exist".format(name) + ret["comment"] = f"{name} does not exist" ret["result"] = False return ret @@ -250,7 +250,7 @@ def present(name, acl_type, acl_name="", perms="", recurse=False, force=False): ) ret.update( { - "comment": "Updated permissions for {}".format(acl_name), + "comment": f"Updated permissions for {acl_name}", "result": True, "changes": changes, } @@ -291,7 +291,7 @@ def present(name, acl_type, acl_name="", perms="", recurse=False, force=False): ) ret.update( { - "comment": "Applied new permissions for {}".format(acl_name), + "comment": f"Applied new permissions for {acl_name}", "result": True, "changes": changes, } @@ -335,7 +335,7 @@ def absent(name, acl_type, acl_name="", perms="", recurse=False): ret = {"name": name, "result": True, "changes": {}, "comment": ""} if not os.path.exists(name): - ret["comment"] = "{} does not exist".format(name) + ret["comment"] = f"{name} does not exist" ret["result"] = False return ret @@ -433,7 +433,7 @@ def list_present(name, acl_type, acl_names=None, perms="", recurse=False, force= _octal = {"r": 4, "w": 2, "x": 1, "-": 0} _octal_perms = sum(_octal.get(i, i) for i in perms) if not os.path.exists(name): - ret["comment"] = "{} does not exist".format(name) + ret["comment"] = f"{name} does not exist" ret["result"] = False return ret @@ -722,7 +722,7 @@ def list_absent(name, acl_type, acl_names=None, recurse=False): ret = {"name": name, "result": True, "changes": {}, "comment": ""} if not os.path.exists(name): - ret["comment"] = "{} does not exist".format(name) + ret["comment"] = f"{name} does not exist" ret["result"] = False return ret diff --git a/tests/integration/modules/test_mac_assistive.py b/tests/integration/modules/test_mac_assistive.py deleted file mode 100644 index 5c435def9807..000000000000 --- a/tests/integration/modules/test_mac_assistive.py +++ /dev/null @@ -1,89 +0,0 @@ -""" - :codeauthor: Nicole Thomas -""" - -import pytest - -from tests.support.case import ModuleCase - -OSA_SCRIPT = "/usr/bin/osascript" - - -@pytest.mark.destructive_test -@pytest.mark.skip_if_not_root -@pytest.mark.skip_initial_gh_actions_failure -@pytest.mark.skip_unless_on_darwin -class MacAssistiveTest(ModuleCase): - """ - Integration tests for the mac_assistive module. - """ - - def setUp(self): - """ - Sets up test requirements - """ - # Let's install a bundle to use in tests - self.run_function("assistive.install", [OSA_SCRIPT, True]) - - def tearDown(self): - """ - Clean up after tests - """ - # Delete any bundles that were installed - osa_script = self.run_function("assistive.installed", [OSA_SCRIPT]) - if osa_script: - self.run_function("assistive.remove", [OSA_SCRIPT]) - - smile_bundle = "com.smileonmymac.textexpander" - smile_bundle_present = self.run_function("assistive.installed", [smile_bundle]) - if smile_bundle_present: - self.run_function("assistive.remove", [smile_bundle]) - - @pytest.mark.slow_test - def test_install_and_remove(self): - """ - Tests installing and removing a bundled ID or command to use assistive access. - """ - new_bundle = "com.smileonmymac.textexpander" - self.assertTrue(self.run_function("assistive.install", [new_bundle])) - self.assertTrue(self.run_function("assistive.remove", [new_bundle])) - - @pytest.mark.slow_test - def test_installed(self): - """ - Tests the True and False return of assistive.installed. - """ - # OSA script should have been installed in setUp function - self.assertTrue(self.run_function("assistive.installed", [OSA_SCRIPT])) - # Clean up install - self.run_function("assistive.remove", [OSA_SCRIPT]) - # Installed should now return False - self.assertFalse(self.run_function("assistive.installed", [OSA_SCRIPT])) - - @pytest.mark.slow_test - def test_enable(self): - """ - Tests setting the enabled status of a bundled ID or command. - """ - # OSA script should have been installed and enabled in setUp function - # Now let's disable it, which should return True. - self.assertTrue(self.run_function("assistive.enable", [OSA_SCRIPT, False])) - # Double check the script was disabled, as intended. - self.assertFalse(self.run_function("assistive.enabled", [OSA_SCRIPT])) - # Now re-enable - self.assertTrue(self.run_function("assistive.enable", [OSA_SCRIPT])) - # Double check the script was enabled, as intended. - self.assertTrue(self.run_function("assistive.enabled", [OSA_SCRIPT])) - - @pytest.mark.slow_test - def test_enabled(self): - """ - Tests if a bundled ID or command is listed in assistive access returns True. - """ - # OSA script should have been installed in setUp function, which sets - # enabled to True by default. - self.assertTrue(self.run_function("assistive.enabled", [OSA_SCRIPT])) - # Disable OSA Script - self.run_function("assistive.enable", [OSA_SCRIPT, False]) - # Assert against new disabled status - self.assertFalse(self.run_function("assistive.enabled", [OSA_SCRIPT])) diff --git a/tests/integration/modules/test_mac_brew_pkg.py b/tests/integration/modules/test_mac_brew_pkg.py deleted file mode 100644 index 59d2dcde1dee..000000000000 --- a/tests/integration/modules/test_mac_brew_pkg.py +++ /dev/null @@ -1,188 +0,0 @@ -""" - :codeauthor: Nicole Thomas -""" - -import pytest - -from salt.exceptions import CommandExecutionError -from tests.support.case import ModuleCase - -# Brew doesn't support local package installation - So, let's -# Grab some small packages available online for brew -ADD_PKG = "algol68g" -DEL_PKG = "acme" - - -@pytest.mark.skip_if_not_root -@pytest.mark.destructive_test -@pytest.mark.skip_if_binaries_missing("brew") -@pytest.mark.skip_unless_on_darwin -class BrewModuleTest(ModuleCase): - """ - Integration tests for the brew module - """ - - @pytest.mark.slow_test - def test_brew_install(self): - """ - Tests the installation of packages - """ - try: - self.run_function("pkg.install", [ADD_PKG]) - pkg_list = self.run_function("pkg.list_pkgs") - try: - self.assertIn(ADD_PKG, pkg_list) - except AssertionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - except CommandExecutionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - - @pytest.mark.slow_test - def test_remove(self): - """ - Tests the removal of packages - """ - try: - # Install a package to delete - If unsuccessful, skip the test - self.run_function("pkg.install", [DEL_PKG]) - pkg_list = self.run_function("pkg.list_pkgs") - if DEL_PKG not in pkg_list: - self.run_function("pkg.install", [DEL_PKG]) - self.skipTest("Failed to install a package to delete") - - # Now remove the installed package - self.run_function("pkg.remove", [DEL_PKG]) - del_list = self.run_function("pkg.list_pkgs") - self.assertNotIn(DEL_PKG, del_list) - except CommandExecutionError: - self.run_function("pkg.remove", [DEL_PKG]) - raise - - @pytest.mark.slow_test - def test_version(self): - """ - Test pkg.version for mac. Installs a package and then checks we can get - a version for the installed package. - """ - try: - self.run_function("pkg.install", [ADD_PKG]) - pkg_list = self.run_function("pkg.list_pkgs") - version = self.run_function("pkg.version", [ADD_PKG]) - try: - self.assertTrue( - version, - msg="version: {} is empty, or other issue is present".format( - version - ), - ) - self.assertIn( - ADD_PKG, - pkg_list, - msg="package: {} is not in the list of installed packages: {}".format( - ADD_PKG, pkg_list - ), - ) - # make sure the version is accurate and is listed in the pkg_list - self.assertIn( - version, - str(pkg_list[ADD_PKG]), - msg="The {} version: {} is not listed in the pkg_list: {}".format( - ADD_PKG, version, pkg_list[ADD_PKG] - ), - ) - except AssertionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - except CommandExecutionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - - @pytest.mark.slow_test - def test_latest_version(self): - """ - Test pkg.latest_version: - - get the latest version available - - install the package - - get the latest version available - - check that the latest version is empty after installing it - """ - try: - self.run_function("pkg.remove", [ADD_PKG]) - uninstalled_latest = self.run_function("pkg.latest_version", [ADD_PKG]) - - self.run_function("pkg.install", [ADD_PKG]) - installed_latest = self.run_function("pkg.latest_version", [ADD_PKG]) - version = self.run_function("pkg.version", [ADD_PKG]) - try: - self.assertTrue(isinstance(uninstalled_latest, str)) - self.assertEqual(installed_latest, version) - except AssertionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - except CommandExecutionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - - @pytest.mark.slow_test - def test_refresh_db(self): - """ - Integration test to ensure pkg.refresh_db works with brew - """ - refresh_brew = self.run_function("pkg.refresh_db") - self.assertTrue(refresh_brew) - - @pytest.mark.slow_test - def test_list_upgrades(self): - """ - Test pkg.list_upgrades: data is in the form {'name1': 'version1', - 'name2': 'version2', ... } - """ - try: - upgrades = self.run_function("pkg.list_upgrades") - try: - self.assertTrue(isinstance(upgrades, dict)) - if upgrades: - for name in upgrades: - self.assertTrue(isinstance(name, str)) - self.assertTrue(isinstance(upgrades[name], str)) - except AssertionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - except CommandExecutionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - - @pytest.mark.slow_test - def test_info_installed(self): - """ - Test pkg.info_installed: info returned has certain fields used by - mac_brew.latest_version - """ - try: - self.run_function("pkg.install", [ADD_PKG]) - info = self.run_function("pkg.info_installed", [ADD_PKG]) - try: - self.assertTrue(ADD_PKG in info) - self.assertTrue("versions" in info[ADD_PKG]) - self.assertTrue("revision" in info[ADD_PKG]) - self.assertTrue("stable" in info[ADD_PKG]["versions"]) - except AssertionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - except CommandExecutionError: - self.run_function("pkg.remove", [ADD_PKG]) - raise - - def tearDown(self): - """ - Clean up after tests - """ - pkg_list = self.run_function("pkg.list_pkgs") - - # Remove any installed packages - if ADD_PKG in pkg_list: - self.run_function("pkg.remove", [ADD_PKG]) - if DEL_PKG in pkg_list: - self.run_function("pkg.remove", [DEL_PKG]) diff --git a/tests/integration/modules/test_mac_desktop.py b/tests/integration/modules/test_mac_desktop.py deleted file mode 100644 index 73f12c18e539..000000000000 --- a/tests/integration/modules/test_mac_desktop.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Integration tests for the mac_desktop execution module. -""" - -import pytest - -from tests.support.case import ModuleCase - - -@pytest.mark.destructive_test -@pytest.mark.skip_if_not_root -@pytest.mark.skip_unless_on_darwin -class MacDesktopTestCase(ModuleCase): - """ - Integration tests for the mac_desktop module. - """ - - def test_get_output_volume(self): - """ - Tests the return of get_output_volume. - """ - ret = self.run_function("desktop.get_output_volume") - self.assertIsNotNone(ret) - - @pytest.mark.slow_test - def test_set_output_volume(self): - """ - Tests the return of set_output_volume. - """ - current_vol = self.run_function("desktop.get_output_volume") - to_set = 10 - if current_vol == str(to_set): - to_set += 2 - new_vol = self.run_function("desktop.set_output_volume", [str(to_set)]) - check_vol = self.run_function("desktop.get_output_volume") - self.assertEqual(new_vol, check_vol) - - # Set volume back to what it was before - self.run_function("desktop.set_output_volume", [current_vol]) - - def test_screensaver(self): - """ - Tests the return of the screensaver function. - """ - self.assertTrue(self.run_function("desktop.screensaver")) - - def test_lock(self): - """ - Tests the return of the lock function. - """ - self.assertTrue(self.run_function("desktop.lock")) - - @pytest.mark.slow_test - def test_say(self): - """ - Tests the return of the say function. - """ - self.assertTrue(self.run_function("desktop.say", ["hello", "world"])) diff --git a/tests/integration/modules/test_mac_group.py b/tests/integration/modules/test_mac_group.py deleted file mode 100644 index 46be79667f85..000000000000 --- a/tests/integration/modules/test_mac_group.py +++ /dev/null @@ -1,177 +0,0 @@ -""" - :codeauthor: Nicole Thomas -""" - -import pytest -from saltfactories.utils import random_string - -from salt.exceptions import CommandExecutionError -from tests.support.case import ModuleCase - -# Create group name strings for tests -ADD_GROUP = random_string("RS-", lowercase=False) -DEL_GROUP = random_string("RS-", lowercase=False) -CHANGE_GROUP = random_string("RS-", lowercase=False) -ADD_USER = random_string("RS-", lowercase=False) -REP_USER_GROUP = random_string("RS-", lowercase=False) - - -@pytest.mark.skip_if_not_root -@pytest.mark.destructive_test -@pytest.mark.skip_unless_on_darwin -class MacGroupModuleTest(ModuleCase): - """ - Integration tests for the mac_group module - """ - - def setUp(self): - """ - Sets up test requirements - """ - os_grain = self.run_function("grains.item", ["kernel"]) - if os_grain["kernel"] not in "Darwin": - self.skipTest("Test not applicable to '{kernel}' kernel".format(**os_grain)) - - @pytest.mark.slow_test - def test_mac_group_add(self): - """ - Tests the add group function - """ - try: - self.run_function("group.add", [ADD_GROUP, 3456]) - group_info = self.run_function("group.info", [ADD_GROUP]) - self.assertEqual(group_info["name"], ADD_GROUP) - except CommandExecutionError: - self.run_function("group.delete", [ADD_GROUP]) - raise - - @pytest.mark.slow_test - def test_mac_group_delete(self): - """ - Tests the delete group function - """ - # Create a group to delete - If unsuccessful, skip the test - if self.run_function("group.add", [DEL_GROUP, 4567]) is not True: - self.run_function("group.delete", [DEL_GROUP]) - self.skipTest("Failed to create a group to delete") - - # Now try to delete the added group - ret = self.run_function("group.delete", [DEL_GROUP]) - self.assertTrue(ret) - - @pytest.mark.slow_test - def test_mac_group_chgid(self): - """ - Tests changing the group id - """ - # Create a group to delete - If unsuccessful, skip the test - if self.run_function("group.add", [CHANGE_GROUP, 5678]) is not True: - self.run_function("group.delete", [CHANGE_GROUP]) - self.skipTest("Failed to create a group to manipulate") - - try: - self.run_function("group.chgid", [CHANGE_GROUP, 6789]) - group_info = self.run_function("group.info", [CHANGE_GROUP]) - self.assertEqual(group_info["gid"], 6789) - except AssertionError: - self.run_function("group.delete", [CHANGE_GROUP]) - raise - - @pytest.mark.slow_test - def test_mac_adduser(self): - """ - Tests adding user to the group - """ - # Create a group to use for test - If unsuccessful, skip the test - if self.run_function("group.add", [ADD_GROUP, 5678]) is not True: - self.run_function("group.delete", [ADD_GROUP]) - self.skipTest("Failed to create a group to manipulate") - - try: - self.run_function("group.adduser", [ADD_GROUP, ADD_USER]) - group_info = self.run_function("group.info", [ADD_GROUP]) - self.assertEqual(ADD_USER, "".join(group_info["members"])) - except AssertionError: - self.run_function("group.delete", [ADD_GROUP]) - raise - - @pytest.mark.slow_test - def test_mac_deluser(self): - """ - Test deleting user from a group - """ - # Create a group to use for test - If unsuccessful, skip the test - if ( - self.run_function("group.add", [ADD_GROUP, 5678]) - and self.run_function("group.adduser", [ADD_GROUP, ADD_USER]) is not True - ): - self.run_function("group.delete", [ADD_GROUP]) - self.skipTest("Failed to create a group to manipulate") - - delusr = self.run_function("group.deluser", [ADD_GROUP, ADD_USER]) - self.assertTrue(delusr) - - group_info = self.run_function("group.info", [ADD_GROUP]) - self.assertNotIn(ADD_USER, "".join(group_info["members"])) - - @pytest.mark.slow_test - def test_mac_members(self): - """ - Test replacing members of a group - """ - if ( - self.run_function("group.add", [ADD_GROUP, 5678]) - and self.run_function("group.adduser", [ADD_GROUP, ADD_USER]) is not True - ): - self.run_function("group.delete", [ADD_GROUP]) - self.skipTest( - "Failed to create the {} group or add user {} to group " - "to manipulate".format(ADD_GROUP, ADD_USER) - ) - - rep_group_mem = self.run_function("group.members", [ADD_GROUP, REP_USER_GROUP]) - self.assertTrue(rep_group_mem) - - # ensure new user is added to group and previous user is removed - group_info = self.run_function("group.info", [ADD_GROUP]) - self.assertIn(REP_USER_GROUP, str(group_info["members"])) - self.assertNotIn(ADD_USER, str(group_info["members"])) - - @pytest.mark.slow_test - def test_mac_getent(self): - """ - Test returning info on all groups - """ - if ( - self.run_function("group.add", [ADD_GROUP, 5678]) - and self.run_function("group.adduser", [ADD_GROUP, ADD_USER]) is not True - ): - self.run_function("group.delete", [ADD_GROUP]) - self.skipTest( - "Failed to create the {} group or add user {} to group " - "to manipulate".format(ADD_GROUP, ADD_USER) - ) - - getinfo = self.run_function("group.getent") - self.assertTrue(getinfo) - self.assertIn(ADD_GROUP, str(getinfo)) - self.assertIn(ADD_USER, str(getinfo)) - - def tearDown(self): - """ - Clean up after tests - """ - # Delete ADD_GROUP - add_info = self.run_function("group.info", [ADD_GROUP]) - if add_info: - self.run_function("group.delete", [ADD_GROUP]) - - # Delete DEL_GROUP if something failed - del_info = self.run_function("group.info", [DEL_GROUP]) - if del_info: - self.run_function("group.delete", [DEL_GROUP]) - - # Delete CHANGE_GROUP - change_info = self.run_function("group.info", [CHANGE_GROUP]) - if change_info: - self.run_function("group.delete", [CHANGE_GROUP]) diff --git a/tests/integration/modules/test_mac_keychain.py b/tests/integration/modules/test_mac_keychain.py deleted file mode 100644 index afd195524f36..000000000000 --- a/tests/integration/modules/test_mac_keychain.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Validate the mac-keychain module -""" - -import os - -import pytest - -from salt.exceptions import CommandExecutionError -from tests.support.case import ModuleCase -from tests.support.runtests import RUNTIME_VARS - - -@pytest.mark.destructive_test -@pytest.mark.skip_if_not_root -@pytest.mark.skip_unless_on_darwin -class MacKeychainModuleTest(ModuleCase): - """ - Integration tests for the mac_keychain module - """ - - @classmethod - def setUpClass(cls): - cls.cert = os.path.join( - RUNTIME_VARS.FILES, "file", "base", "certs", "salttest.p12" - ) - cls.cert_alias = "Salt Test" - cls.passwd = "salttest" - - def tearDown(self): - """ - Clean up after tests - """ - # Remove the salttest cert, if left over. - certs_list = self.run_function("keychain.list_certs") - if self.cert_alias in certs_list: - self.run_function("keychain.uninstall", [self.cert_alias]) - - @pytest.mark.slow_test - def test_mac_keychain_install(self): - """ - Tests that attempts to install a certificate - """ - install_cert = self.run_function("keychain.install", [self.cert, self.passwd]) - self.assertTrue(install_cert) - - # check to ensure the cert was installed - certs_list = self.run_function("keychain.list_certs") - self.assertIn(self.cert_alias, certs_list) - - @pytest.mark.slow_test - def test_mac_keychain_uninstall(self): - """ - Tests that attempts to uninstall a certificate - """ - self.run_function("keychain.install", [self.cert, self.passwd]) - certs_list = self.run_function("keychain.list_certs") - - if self.cert_alias not in certs_list: - self.run_function("keychain.uninstall", [self.cert_alias]) - self.skipTest("Failed to install keychain") - - # uninstall cert - self.run_function("keychain.uninstall", [self.cert_alias]) - certs_list = self.run_function("keychain.list_certs") - - # check to ensure the cert was uninstalled - try: - self.assertNotIn(self.cert_alias, str(certs_list)) - except CommandExecutionError: - self.run_function("keychain.uninstall", [self.cert_alias]) - - @pytest.mark.slow_test - def test_mac_keychain_get_friendly_name(self): - """ - Test that attempts to get friendly name of a cert - """ - self.run_function("keychain.install", [self.cert, self.passwd]) - certs_list = self.run_function("keychain.list_certs") - if self.cert_alias not in certs_list: - self.run_function("keychain.uninstall", [self.cert_alias]) - self.skipTest("Failed to install keychain") - - get_name = self.run_function( - "keychain.get_friendly_name", [self.cert, self.passwd] - ) - self.assertEqual(get_name, self.cert_alias) - - @pytest.mark.slow_test - def test_mac_keychain_get_default_keychain(self): - """ - Test that attempts to get the default keychain - """ - salt_get_keychain = self.run_function("keychain.get_default_keychain") - sys_get_keychain = self.run_function( - "cmd.run", ["security default-keychain -d user"] - ) - self.assertEqual(salt_get_keychain, sys_get_keychain) - - def test_mac_keychain_list_certs(self): - """ - Test that attempts to list certs - """ - cert_default = "com.apple.systemdefault" - certs = self.run_function("keychain.list_certs") - self.assertIn(cert_default, certs) diff --git a/tests/integration/modules/test_mac_portspkg.py b/tests/integration/modules/test_mac_portspkg.py deleted file mode 100644 index 35ebe3735b6e..000000000000 --- a/tests/integration/modules/test_mac_portspkg.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -integration tests for mac_ports -""" - -import pytest - -from tests.support.case import ModuleCase - - -@pytest.mark.skip_if_not_root -@pytest.mark.skip_if_binaries_missing("port") -@pytest.mark.skip_unless_on_darwin -class MacPortsModuleTest(ModuleCase): - """ - Validate the mac_ports module - """ - - AGREE_INSTALLED = False - - def setUp(self): - """ - Get current settings - """ - self.AGREE_INSTALLED = "agree" in self.run_function("pkg.list_pkgs") - self.run_function("pkg.refresh_db") - - def tearDown(self): - """ - Reset to original settings - """ - if not self.AGREE_INSTALLED: - self.run_function("pkg.remove", ["agree"]) - - @pytest.mark.destructive_test - def test_list_pkgs(self): - """ - Test pkg.list_pkgs - """ - self.run_function("pkg.install", ["agree"]) - self.assertIsInstance(self.run_function("pkg.list_pkgs"), dict) - self.assertIn("agree", self.run_function("pkg.list_pkgs")) - - @pytest.mark.destructive_test - def test_latest_version(self): - """ - Test pkg.latest_version - """ - self.run_function("pkg.install", ["agree"]) - result = self.run_function("pkg.latest_version", ["agree"], refresh=False) - self.assertIsInstance(result, dict) - self.assertIn("agree", result) - - @pytest.mark.destructive_test - def test_remove(self): - """ - Test pkg.remove - """ - self.run_function("pkg.install", ["agree"]) - removed = self.run_function("pkg.remove", ["agree"]) - self.assertIsInstance(removed, dict) - self.assertIn("agree", removed) - - @pytest.mark.destructive_test - def test_install(self): - """ - Test pkg.install - """ - self.run_function("pkg.remove", ["agree"]) - installed = self.run_function("pkg.install", ["agree"]) - self.assertIsInstance(installed, dict) - self.assertIn("agree", installed) - - def test_list_upgrades(self): - """ - Test pkg.list_upgrades - """ - self.assertIsInstance( - self.run_function("pkg.list_upgrades", refresh=False), dict - ) - - @pytest.mark.destructive_test - def test_upgrade_available(self): - """ - Test pkg.upgrade_available - """ - self.run_function("pkg.install", ["agree"]) - self.assertFalse( - self.run_function("pkg.upgrade_available", ["agree"], refresh=False) - ) - - def test_refresh_db(self): - """ - Test pkg.refresh_db - """ - self.assertTrue(self.run_function("pkg.refresh_db")) - - @pytest.mark.destructive_test - def test_upgrade(self): - """ - Test pkg.upgrade - """ - results = self.run_function("pkg.upgrade", refresh=False) - self.assertIsInstance(results, dict) - self.assertTrue(results["result"]) diff --git a/tests/integration/modules/test_mac_power.py b/tests/integration/modules/test_mac_power.py deleted file mode 100644 index aa0de2667077..000000000000 --- a/tests/integration/modules/test_mac_power.py +++ /dev/null @@ -1,345 +0,0 @@ -""" -integration tests for mac_power -""" - -import pytest - -from tests.support.case import ModuleCase - - -@pytest.mark.flaky(max_runs=10) -@pytest.mark.skip_unless_on_darwin -@pytest.mark.skip_if_binaries_missing("systemsetup") -@pytest.mark.skip_if_not_root -@pytest.mark.slow_test -class MacPowerModuleTest(ModuleCase): - """ - Validate the mac_power module - """ - - def setUp(self): - """ - Get current settings - """ - # Get current settings - self.COMPUTER_SLEEP = self.run_function("power.get_computer_sleep") - self.DISPLAY_SLEEP = self.run_function("power.get_display_sleep") - self.HARD_DISK_SLEEP = self.run_function("power.get_harddisk_sleep") - - def tearDown(self): - """ - Reset to original settings - """ - self.run_function("power.set_computer_sleep", [self.COMPUTER_SLEEP]) - self.run_function("power.set_display_sleep", [self.DISPLAY_SLEEP]) - self.run_function("power.set_harddisk_sleep", [self.HARD_DISK_SLEEP]) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_computer_sleep(self): - """ - Test power.get_computer_sleep - Test power.set_computer_sleep - """ - - # Normal Functionality - self.assertTrue(self.run_function("power.set_computer_sleep", [90])) - self.assertEqual( - self.run_function("power.get_computer_sleep"), "after 90 minutes" - ) - self.assertTrue(self.run_function("power.set_computer_sleep", ["Off"])) - self.assertEqual(self.run_function("power.get_computer_sleep"), "Never") - - # Test invalid input - self.assertIn( - "Invalid String Value for Minutes", - self.run_function("power.set_computer_sleep", ["spongebob"]), - ) - self.assertIn( - "Invalid Integer Value for Minutes", - self.run_function("power.set_computer_sleep", [0]), - ) - self.assertIn( - "Invalid Integer Value for Minutes", - self.run_function("power.set_computer_sleep", [181]), - ) - self.assertIn( - "Invalid Boolean Value for Minutes", - self.run_function("power.set_computer_sleep", [True]), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_display_sleep(self): - """ - Test power.get_display_sleep - Test power.set_display_sleep - """ - - # Normal Functionality - self.assertTrue(self.run_function("power.set_display_sleep", [90])) - self.assertEqual( - self.run_function("power.get_display_sleep"), "after 90 minutes" - ) - self.assertTrue(self.run_function("power.set_display_sleep", ["Off"])) - self.assertEqual(self.run_function("power.get_display_sleep"), "Never") - - # Test invalid input - self.assertIn( - "Invalid String Value for Minutes", - self.run_function("power.set_display_sleep", ["spongebob"]), - ) - self.assertIn( - "Invalid Integer Value for Minutes", - self.run_function("power.set_display_sleep", [0]), - ) - self.assertIn( - "Invalid Integer Value for Minutes", - self.run_function("power.set_display_sleep", [181]), - ) - self.assertIn( - "Invalid Boolean Value for Minutes", - self.run_function("power.set_display_sleep", [True]), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_harddisk_sleep(self): - """ - Test power.get_harddisk_sleep - Test power.set_harddisk_sleep - """ - - # Normal Functionality - self.assertTrue(self.run_function("power.set_harddisk_sleep", [90])) - self.assertEqual( - self.run_function("power.get_harddisk_sleep"), "after 90 minutes" - ) - self.assertTrue(self.run_function("power.set_harddisk_sleep", ["Off"])) - self.assertEqual(self.run_function("power.get_harddisk_sleep"), "Never") - - # Test invalid input - self.assertIn( - "Invalid String Value for Minutes", - self.run_function("power.set_harddisk_sleep", ["spongebob"]), - ) - self.assertIn( - "Invalid Integer Value for Minutes", - self.run_function("power.set_harddisk_sleep", [0]), - ) - self.assertIn( - "Invalid Integer Value for Minutes", - self.run_function("power.set_harddisk_sleep", [181]), - ) - self.assertIn( - "Invalid Boolean Value for Minutes", - self.run_function("power.set_harddisk_sleep", [True]), - ) - - @pytest.mark.slow_test - def test_restart_freeze(self): - """ - Test power.get_restart_freeze - Test power.set_restart_freeze - """ - # Normal Functionality - self.assertTrue(self.run_function("power.set_restart_freeze", ["on"])) - self.assertTrue(self.run_function("power.get_restart_freeze")) - # This will return False because mac fails to actually make the change - self.assertFalse(self.run_function("power.set_restart_freeze", ["off"])) - # Even setting to off returns true, it actually is never set - # This is an apple bug - self.assertTrue(self.run_function("power.get_restart_freeze")) - - -@pytest.mark.flaky(max_runs=10) -@pytest.mark.skip_unless_on_darwin -@pytest.mark.skip_if_binaries_missing("systemsetup") -@pytest.mark.skip_if_not_root -class MacPowerModuleTestSleepOnPowerButton(ModuleCase): - """ - Test power.get_sleep_on_power_button - Test power.set_sleep_on_power_button - """ - - SLEEP_ON_BUTTON = None - - def setUp(self): - """ - Check if function is available - Get existing value - """ - # Is the function available - ret = self.run_function("power.get_sleep_on_power_button") - if isinstance(ret, bool): - self.SLEEP_ON_BUTTON = self.run_function("power.get_sleep_on_power_button") - - def tearDown(self): - """ - Reset to original value - """ - if self.SLEEP_ON_BUTTON is not None: - self.run_function("power.set_sleep_on_power_button", [self.SLEEP_ON_BUTTON]) - - @pytest.mark.slow_test - def test_sleep_on_power_button(self): - """ - Test power.get_sleep_on_power_button - Test power.set_sleep_on_power_button - """ - # If available on this system, test it - if self.SLEEP_ON_BUTTON is None: - # Check for not available - ret = self.run_function("power.get_sleep_on_power_button") - self.assertIn("Error", ret) - else: - self.assertTrue( - self.run_function("power.set_sleep_on_power_button", ["on"]) - ) - self.assertTrue(self.run_function("power.get_sleep_on_power_button")) - self.assertTrue( - self.run_function("power.set_sleep_on_power_button", ["off"]) - ) - self.assertFalse(self.run_function("power.get_sleep_on_power_button")) - - -@pytest.mark.flaky(max_runs=10) -@pytest.mark.skip_unless_on_darwin -@pytest.mark.skip_if_binaries_missing("systemsetup") -@pytest.mark.skip_if_not_root -class MacPowerModuleTestRestartPowerFailure(ModuleCase): - """ - Test power.get_restart_power_failure - Test power.set_restart_power_failure - """ - - RESTART_POWER = None - - def setUp(self): - """ - Check if function is available - Get existing value - """ - # Is the function available - ret = self.run_function("power.get_restart_power_failure") - if isinstance(ret, bool): - self.RESTART_POWER = ret - - def tearDown(self): - """ - Reset to original value - """ - if self.RESTART_POWER is not None: - self.run_function("power.set_sleep_on_power_button", [self.RESTART_POWER]) - - def test_restart_power_failure(self): - """ - Test power.get_restart_power_failure - Test power.set_restart_power_failure - """ - # If available on this system, test it - if self.RESTART_POWER is None: - # Check for not available - ret = self.run_function("power.get_restart_power_failure") - self.assertIn("Error", ret) - else: - self.assertTrue( - self.run_function("power.set_restart_power_failure", ["on"]) - ) - self.assertTrue(self.run_function("power.get_restart_power_failure")) - self.assertTrue( - self.run_function("power.set_restart_power_failure", ["off"]) - ) - self.assertFalse(self.run_function("power.get_restart_power_failure")) - - -@pytest.mark.flaky(max_runs=10) -@pytest.mark.skip_unless_on_darwin -@pytest.mark.skip_if_binaries_missing("systemsetup") -@pytest.mark.skip_if_not_root -class MacPowerModuleTestWakeOnNet(ModuleCase): - """ - Test power.get_wake_on_network - Test power.set_wake_on_network - """ - - WAKE_ON_NET = None - - def setUp(self): - """ - Check if function is available - Get existing value - """ - # Is the function available - ret = self.run_function("power.get_wake_on_network") - if isinstance(ret, bool): - self.WAKE_ON_NET = ret - - def tearDown(self): - """ - Reset to original value - """ - if self.WAKE_ON_NET is not None: - self.run_function("power.set_wake_on_network", [self.WAKE_ON_NET]) - - def test_wake_on_network(self): - """ - Test power.get_wake_on_network - Test power.set_wake_on_network - """ - # If available on this system, test it - if self.WAKE_ON_NET is None: - # Check for not available - ret = self.run_function("power.get_wake_on_network") - self.assertIn("Error", ret) - else: - self.assertTrue(self.run_function("power.set_wake_on_network", ["on"])) - self.assertTrue(self.run_function("power.get_wake_on_network")) - self.assertTrue(self.run_function("power.set_wake_on_network", ["off"])) - self.assertFalse(self.run_function("power.get_wake_on_network")) - - -@pytest.mark.flaky(max_runs=10) -@pytest.mark.skip_unless_on_darwin -@pytest.mark.skip_if_binaries_missing("systemsetup") -@pytest.mark.skip_if_not_root -class MacPowerModuleTestWakeOnModem(ModuleCase): - """ - Test power.get_wake_on_modem - Test power.set_wake_on_modem - """ - - WAKE_ON_MODEM = None - - def setUp(self): - """ - Check if function is available - Get existing value - """ - # Is the function available - ret = self.run_function("power.get_wake_on_modem") - if isinstance(ret, bool): - self.WAKE_ON_MODEM = ret - - def tearDown(self): - """ - Reset to original value - """ - if self.WAKE_ON_MODEM is not None: - self.run_function("power.set_wake_on_modem", [self.WAKE_ON_MODEM]) - - def test_wake_on_modem(self): - """ - Test power.get_wake_on_modem - Test power.set_wake_on_modem - """ - # If available on this system, test it - if self.WAKE_ON_MODEM is None: - # Check for not available - ret = self.run_function("power.get_wake_on_modem") - self.assertIn("Error", ret) - else: - self.assertTrue(self.run_function("power.set_wake_on_modem", ["on"])) - self.assertTrue(self.run_function("power.get_wake_on_modem")) - self.assertTrue(self.run_function("power.set_wake_on_modem", ["off"])) - self.assertFalse(self.run_function("power.get_wake_on_modem")) diff --git a/tests/integration/modules/test_mac_service.py b/tests/integration/modules/test_mac_service.py deleted file mode 100644 index d4022cab535e..000000000000 --- a/tests/integration/modules/test_mac_service.py +++ /dev/null @@ -1,233 +0,0 @@ -""" -integration tests for mac_service -""" - -import plistlib - -import pytest - -import salt.utils.files -from tests.support.case import ModuleCase - - -@pytest.mark.skip_if_not_root -@pytest.mark.skip_if_binaries_missing("launchctl", "plutil") -@pytest.mark.skip_unless_on_darwin -class MacServiceModuleTest(ModuleCase): - """ - Validate the mac_service module - """ - - SERVICE_NAME = "com.salt.integration.test" - SERVICE_PATH = "/Library/LaunchDaemons/com.salt.integration.test.plist" - - def setUp(self): - """ - setup our test launch service. - """ - service_data = { - "KeepAlive": True, - "Label": self.SERVICE_NAME, - "ProgramArguments": ["/bin/sleep", "1000"], - "RunAtLoad": True, - } - with salt.utils.files.fopen(self.SERVICE_PATH, "wb") as fp: - plistlib.dump(service_data, fp) - self.run_function("service.enable", [self.SERVICE_NAME]) - self.run_function("service.start", [self.SERVICE_NAME]) - - def tearDown(self): - """ - stop and remove our test service. - """ - self.run_function("service.stop", [self.SERVICE_NAME]) - salt.utils.files.safe_rm(self.SERVICE_PATH) - - @pytest.mark.slow_test - def test_show(self): - """ - Test service.show - """ - # Existing Service - service_info = self.run_function("service.show", [self.SERVICE_NAME]) - self.assertIsInstance(service_info, dict) - self.assertEqual(service_info["plist"]["Label"], self.SERVICE_NAME) - - # Missing Service - self.assertIn( - "Service not found", self.run_function("service.show", ["spongebob"]) - ) - - @pytest.mark.slow_test - def test_launchctl(self): - """ - Test service.launchctl - """ - # Expected Functionality - self.assertTrue( - self.run_function("service.launchctl", ["error", "bootstrap", 64]) - ) - self.assertEqual( - self.run_function( - "service.launchctl", ["error", "bootstrap", 64], return_stdout=True - ), - "64: unknown error code", - ) - - # Raise an error - self.assertIn( - "Failed to error service", - self.run_function("service.launchctl", ["error", "bootstrap"]), - ) - - @pytest.mark.slow_test - def test_list(self): - """ - Test service.list - """ - # Expected Functionality - self.assertIn("PID", self.run_function("service.list")) - self.assertIn("{", self.run_function("service.list", [self.SERVICE_NAME])) - - # Service not found - self.assertIn( - "Service not found", self.run_function("service.list", ["spongebob"]) - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_enable(self): - """ - Test service.enable - """ - self.assertTrue(self.run_function("service.enable", [self.SERVICE_NAME])) - - self.assertIn( - "Service not found", self.run_function("service.enable", ["spongebob"]) - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_disable(self): - """ - Test service.disable - """ - self.assertTrue(self.run_function("service.disable", [self.SERVICE_NAME])) - - self.assertIn( - "Service not found", self.run_function("service.disable", ["spongebob"]) - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_start(self): - """ - Test service.start - Test service.stop - Test service.status - """ - self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME])) - - self.assertIn( - "Service not found", self.run_function("service.start", ["spongebob"]) - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_stop(self): - """ - Test service.stop - """ - self.assertTrue(self.run_function("service.stop", [self.SERVICE_NAME])) - - self.assertIn( - "Service not found", self.run_function("service.stop", ["spongebob"]) - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_status(self): - """ - Test service.status - """ - # A running service - self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME])) - self.assertTrue(self.run_function("service.status", [self.SERVICE_NAME])) - - # A stopped service - self.assertTrue(self.run_function("service.stop", [self.SERVICE_NAME])) - self.assertFalse(self.run_function("service.status", [self.SERVICE_NAME])) - - # Service not found - self.assertFalse(self.run_function("service.status", ["spongebob"])) - - @pytest.mark.slow_test - def test_available(self): - """ - Test service.available - """ - self.assertTrue(self.run_function("service.available", [self.SERVICE_NAME])) - self.assertFalse(self.run_function("service.available", ["spongebob"])) - - @pytest.mark.slow_test - def test_missing(self): - """ - Test service.missing - """ - self.assertFalse(self.run_function("service.missing", [self.SERVICE_NAME])) - self.assertTrue(self.run_function("service.missing", ["spongebob"])) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_enabled(self): - """ - Test service.enabled - """ - self.assertTrue(self.run_function("service.enabled", [self.SERVICE_NAME])) - self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME])) - - self.assertTrue(self.run_function("service.enabled", [self.SERVICE_NAME])) - self.assertTrue(self.run_function("service.stop", [self.SERVICE_NAME])) - - self.assertTrue(self.run_function("service.enabled", ["spongebob"])) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_disabled(self): - """ - Test service.disabled - """ - self.assertTrue(self.run_function("service.start", [self.SERVICE_NAME])) - self.assertFalse(self.run_function("service.disabled", [self.SERVICE_NAME])) - - self.assertTrue(self.run_function("service.disable", [self.SERVICE_NAME])) - self.assertTrue(self.run_function("service.disabled", [self.SERVICE_NAME])) - self.assertTrue(self.run_function("service.enable", [self.SERVICE_NAME])) - self.assertIn( - "Service not found", self.run_function("service.stop", ["spongebob"]) - ) - - @pytest.mark.slow_test - def test_get_all(self): - """ - Test service.get_all - """ - services = self.run_function("service.get_all") - self.assertIsInstance(services, list) - self.assertIn(self.SERVICE_NAME, services) - - @pytest.mark.slow_test - def test_get_enabled(self): - """ - Test service.get_enabled - """ - services = self.run_function("service.get_enabled") - self.assertIsInstance(services, list) - self.assertIn(self.SERVICE_NAME, services) - - @pytest.mark.slow_test - def test_service_laoded(self): - """ - Test service.get_enabled - """ - self.assertTrue(self.run_function("service.loaded", [self.SERVICE_NAME])) diff --git a/tests/integration/modules/test_mac_shadow.py b/tests/integration/modules/test_mac_shadow.py deleted file mode 100644 index bb859ffbf099..000000000000 --- a/tests/integration/modules/test_mac_shadow.py +++ /dev/null @@ -1,226 +0,0 @@ -""" -integration tests for mac_shadow -""" - -import datetime - -import pytest -from saltfactories.utils import random_string - -from tests.support.case import ModuleCase - -TEST_USER = random_string("RS-", lowercase=False) -NO_USER = random_string("RS-", lowercase=False) - - -@pytest.mark.skip_if_binaries_missing("dscl", "pwpolicy") -@pytest.mark.skip_if_not_root -@pytest.mark.skip_unless_on_darwin -class MacShadowModuleTest(ModuleCase): - """ - Validate the mac_shadow module - """ - - def setUp(self): - """ - Get current settings - """ - self.run_function("user.add", [TEST_USER]) - - def tearDown(self): - """ - Reset to original settings - """ - self.run_function("user.delete", [TEST_USER]) - - @pytest.mark.slow_test - @pytest.mark.skip_initial_gh_actions_failure - def test_info(self): - """ - Test shadow.info - """ - # Correct Functionality - ret = self.run_function("shadow.info", [TEST_USER]) - self.assertEqual(ret["name"], TEST_USER) - - # User does not exist - ret = self.run_function("shadow.info", [NO_USER]) - self.assertEqual(ret["name"], "") - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_get_account_created(self): - """ - Test shadow.get_account_created - """ - # Correct Functionality - text_date = self.run_function("shadow.get_account_created", [TEST_USER]) - self.assertNotEqual(text_date, "Invalid Timestamp") - obj_date = datetime.datetime.strptime(text_date, "%Y-%m-%d %H:%M:%S") - self.assertIsInstance(obj_date, datetime.date) - - # User does not exist - self.assertEqual( - self.run_function("shadow.get_account_created", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - @pytest.mark.skip_initial_gh_actions_failure - def test_get_last_change(self): - """ - Test shadow.get_last_change - """ - # Correct Functionality - text_date = self.run_function("shadow.get_last_change", [TEST_USER]) - self.assertNotEqual(text_date, "Invalid Timestamp") - obj_date = datetime.datetime.strptime(text_date, "%Y-%m-%d %H:%M:%S") - self.assertIsInstance(obj_date, datetime.date) - - # User does not exist - self.assertEqual( - self.run_function("shadow.get_last_change", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - @pytest.mark.skip_initial_gh_actions_failure - def test_get_login_failed_last(self): - """ - Test shadow.get_login_failed_last - """ - # Correct Functionality - text_date = self.run_function("shadow.get_login_failed_last", [TEST_USER]) - self.assertNotEqual(text_date, "Invalid Timestamp") - obj_date = datetime.datetime.strptime(text_date, "%Y-%m-%d %H:%M:%S") - self.assertIsInstance(obj_date, datetime.date) - - # User does not exist - self.assertEqual( - self.run_function("shadow.get_login_failed_last", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - @pytest.mark.skip_initial_gh_actions_failure - def test_get_login_failed_count(self): - """ - Test shadow.get_login_failed_count - """ - # Correct Functionality - self.assertEqual( - self.run_function("shadow.get_login_failed_count", [TEST_USER]), "0" - ) - - # User does not exist - self.assertEqual( - self.run_function("shadow.get_login_failed_count", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_get_set_maxdays(self): - """ - Test shadow.get_maxdays - Test shadow.set_maxdays - """ - # Correct Functionality - self.assertTrue(self.run_function("shadow.set_maxdays", [TEST_USER, 20])) - self.assertEqual(self.run_function("shadow.get_maxdays", [TEST_USER]), 20) - - # User does not exist - self.assertEqual( - self.run_function("shadow.set_maxdays", [NO_USER, 7]), - "ERROR: User not found: {}".format(NO_USER), - ) - self.assertEqual( - self.run_function("shadow.get_maxdays", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_get_set_change(self): - """ - Test shadow.get_change - Test shadow.set_change - """ - # Correct Functionality - self.assertTrue( - self.run_function("shadow.set_change", [TEST_USER, "02/11/2011"]) - ) - self.assertEqual( - self.run_function("shadow.get_change", [TEST_USER]), "02/11/2011" - ) - - # User does not exist - self.assertEqual( - self.run_function("shadow.set_change", [NO_USER, "02/11/2012"]), - "ERROR: User not found: {}".format(NO_USER), - ) - self.assertEqual( - self.run_function("shadow.get_change", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_get_set_expire(self): - """ - Test shadow.get_expire - Test shadow.set_expire - """ - # Correct Functionality - self.assertTrue( - self.run_function("shadow.set_expire", [TEST_USER, "02/11/2011"]) - ) - self.assertEqual( - self.run_function("shadow.get_expire", [TEST_USER]), "02/11/2011" - ) - - # User does not exist - self.assertEqual( - self.run_function("shadow.set_expire", [NO_USER, "02/11/2012"]), - "ERROR: User not found: {}".format(NO_USER), - ) - self.assertEqual( - self.run_function("shadow.get_expire", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_del_password(self): - """ - Test shadow.del_password - """ - # Correct Functionality - self.assertTrue(self.run_function("shadow.del_password", [TEST_USER])) - self.assertEqual(self.run_function("shadow.info", [TEST_USER])["passwd"], "*") - - # User does not exist - self.assertEqual( - self.run_function("shadow.del_password", [NO_USER]), - "ERROR: User not found: {}".format(NO_USER), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_set_password(self): - """ - Test shadow.set_password - """ - # Correct Functionality - self.assertTrue( - self.run_function("shadow.set_password", [TEST_USER, "Pa$$W0rd"]) - ) - - # User does not exist - self.assertEqual( - self.run_function("shadow.set_password", [NO_USER, "P@SSw0rd"]), - "ERROR: User not found: {}".format(NO_USER), - ) diff --git a/tests/integration/modules/test_mac_softwareupdate.py b/tests/integration/modules/test_mac_softwareupdate.py deleted file mode 100644 index a8094969c358..000000000000 --- a/tests/integration/modules/test_mac_softwareupdate.py +++ /dev/null @@ -1,170 +0,0 @@ -""" -integration tests for mac_softwareupdate -""" - -import pytest - -from tests.support.case import ModuleCase - - -@pytest.mark.skip_if_not_root -@pytest.mark.skip_if_binaries_missing("softwareupdate") -@pytest.mark.skip_unless_on_darwin -class MacSoftwareUpdateModuleTest(ModuleCase): - """ - Validate the mac_softwareupdate module - """ - - IGNORED_LIST = [] - SCHEDULE = False - CATALOG = "" - - def setUp(self): - """ - Get current settings - """ - self.IGNORED_LIST = self.run_function("softwareupdate.list_ignored") - self.SCHEDULE = self.run_function("softwareupdate.schedule") - self.CATALOG = self.run_function("softwareupdate.get_catalog") - - super().setUp() - - def tearDown(self): - """ - Reset to original settings - """ - if self.IGNORED_LIST: - for item in self.IGNORED_LIST: - self.run_function("softwareupdate.ignore", [item]) - else: - self.run_function("softwareupdate.reset_ignored") - - self.run_function("softwareupdate.schedule", [self.SCHEDULE]) - - if self.CATALOG == "Default": - self.run_function("softwareupdate.reset_catalog") - else: - self.run_function("softwareupdate.set_catalog", [self.CATALOG]) - - super().tearDown() - - @pytest.mark.slow_test - def test_list_available(self): - """ - Test softwareupdate.list_available - """ - # Can't predict what will be returned, so can only test that the return - # is the correct type, dict - self.assertIsInstance(self.run_function("softwareupdate.list_available"), dict) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - @pytest.mark.skip_initial_gh_actions_failure - def test_ignore(self): - """ - Test softwareupdate.ignore - Test softwareupdate.list_ignored - Test softwareupdate.reset_ignored - """ - # Test reset_ignored - self.assertTrue(self.run_function("softwareupdate.reset_ignored")) - self.assertEqual(self.run_function("softwareupdate.list_ignored"), []) - - # Test ignore - self.assertTrue(self.run_function("softwareupdate.ignore", ["spongebob"])) - self.assertTrue(self.run_function("softwareupdate.ignore", ["squidward"])) - - # Test list_ignored and verify ignore - self.assertIn("spongebob", self.run_function("softwareupdate.list_ignored")) - self.assertIn("squidward", self.run_function("softwareupdate.list_ignored")) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - @pytest.mark.skip_initial_gh_actions_failure - def test_schedule(self): - """ - Test softwareupdate.schedule_enable - Test softwareupdate.schedule_enabled - """ - # Test enable - self.assertTrue(self.run_function("softwareupdate.schedule_enable", [True])) - self.assertTrue(self.run_function("softwareupdate.schedule_enabled")) - - # Test disable in case it was already enabled - self.assertTrue(self.run_function("softwareupdate.schedule_enable", [False])) - self.assertFalse(self.run_function("softwareupdate.schedule_enabled")) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_update(self): - """ - Test softwareupdate.update_all - Test softwareupdate.update - Test softwareupdate.update_available - - Need to know the names of updates that are available to properly test - the update functions... - """ - # There's no way to know what the dictionary will contain, so all we can - # check is that the return is a dictionary - self.assertIsInstance(self.run_function("softwareupdate.update_all"), dict) - - # Test update_available - self.assertFalse( - self.run_function("softwareupdate.update_available", ["spongebob"]) - ) - - # Test update not available - self.assertIn( - "Update not available", - self.run_function("softwareupdate.update", ["spongebob"]), - ) - - @pytest.mark.slow_test - def test_list_downloads(self): - """ - Test softwareupdate.list_downloads - """ - self.assertIsInstance(self.run_function("softwareupdate.list_downloads"), list) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_download(self): - """ - Test softwareupdate.download - - Need to know the names of updates that are available to properly test - the download function - """ - # Test update not available - self.assertIn( - "Update not available", - self.run_function("softwareupdate.download", ["spongebob"]), - ) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - def test_download_all(self): - """ - Test softwareupdate.download_all - """ - self.assertIsInstance(self.run_function("softwareupdate.download_all"), list) - - @pytest.mark.destructive_test - @pytest.mark.slow_test - @pytest.mark.skip_initial_gh_actions_failure - def test_get_set_reset_catalog(self): - """ - Test softwareupdate.download_all - """ - # Reset the catalog - self.assertTrue(self.run_function("softwareupdate.reset_catalog")) - self.assertEqual(self.run_function("softwareupdate.get_catalog"), "Default") - - # Test setting and getting the catalog - self.assertTrue(self.run_function("softwareupdate.set_catalog", ["spongebob"])) - self.assertEqual(self.run_function("softwareupdate.get_catalog"), "spongebob") - - # Test reset the catalog - self.assertTrue(self.run_function("softwareupdate.reset_catalog")) - self.assertEqual(self.run_function("softwareupdate.get_catalog"), "Default") diff --git a/tests/integration/modules/test_mac_sysctl.py b/tests/integration/modules/test_mac_sysctl.py deleted file mode 100644 index cdf1b665a537..000000000000 --- a/tests/integration/modules/test_mac_sysctl.py +++ /dev/null @@ -1,174 +0,0 @@ -""" - :codeauthor: Nicole Thomas -""" - -import os -import random - -import pytest - -import salt.utils.files -from salt.exceptions import CommandExecutionError -from tests.support.case import ModuleCase - -# Module Variables -ASSIGN_CMD = "net.inet.icmp.timestamp" -CONFIG = "/etc/sysctl.conf" - - -@pytest.mark.destructive_test -@pytest.mark.skip_if_not_root -@pytest.mark.skip_unless_on_darwin -class DarwinSysctlModuleTest(ModuleCase): - """ - Integration tests for the darwin_sysctl module - """ - - def setUp(self): - """ - Sets up the test requirements - """ - super().setUp() - # Data needed for cleanup - self.has_conf = False - self.val = self.run_function("sysctl.get", [ASSIGN_CMD]) - - # If sysctl file is present, make a copy - # Remove original file so we can replace it with test files - if os.path.isfile(CONFIG): - self.has_conf = True - try: - self.conf = self.__copy_sysctl() - except CommandExecutionError: - msg = "Could not copy file: {0}" - raise CommandExecutionError(msg.format(CONFIG)) - os.remove(CONFIG) - - @pytest.mark.slow_test - def test_assign(self): - """ - Tests assigning a single sysctl parameter - """ - try: - rand = random.randint(0, 500) - while rand == self.val: - rand = random.randint(0, 500) - self.run_function("sysctl.assign", [ASSIGN_CMD, rand]) - info = int(self.run_function("sysctl.get", [ASSIGN_CMD])) - try: - self.assertEqual(rand, info) - except AssertionError: - self.run_function("sysctl.assign", [ASSIGN_CMD, self.val]) - raise - except CommandExecutionError: - self.run_function("sysctl.assign", [ASSIGN_CMD, self.val]) - raise - - @pytest.mark.slow_test - def test_persist_new_file(self): - """ - Tests assigning a sysctl value to a system without a sysctl.conf file - """ - # Always start with a clean/known sysctl.conf state - if os.path.isfile(CONFIG): - os.remove(CONFIG) - try: - self.run_function("sysctl.persist", [ASSIGN_CMD, 10]) - line = f"{ASSIGN_CMD}={10}" - found = self.__check_string(CONFIG, line) - self.assertTrue(found) - except CommandExecutionError: - os.remove(CONFIG) - raise - - @pytest.mark.slow_test - def test_persist_already_set(self): - """ - Tests assigning a sysctl value that is already set in sysctl.conf file - """ - # Always start with a clean/known sysctl.conf state - if os.path.isfile(CONFIG): - os.remove(CONFIG) - try: - self.run_function("sysctl.persist", [ASSIGN_CMD, 50]) - ret = self.run_function("sysctl.persist", [ASSIGN_CMD, 50]) - self.assertEqual(ret, "Already set") - except CommandExecutionError: - os.remove(CONFIG) - raise - - @pytest.mark.slow_test - def test_persist_apply_change(self): - """ - Tests assigning a sysctl value and applying the change to system - """ - # Always start with a clean/known sysctl.conf state - if os.path.isfile(CONFIG): - os.remove(CONFIG) - try: - rand = random.randint(0, 500) - while rand == self.val: - rand = random.randint(0, 500) - self.run_function("sysctl.persist", [ASSIGN_CMD, rand], apply_change=True) - info = int(self.run_function("sysctl.get", [ASSIGN_CMD])) - self.assertEqual(info, rand) - except CommandExecutionError: - os.remove(CONFIG) - raise - - def __copy_sysctl(self): - """ - Copies an existing sysconf file and returns temp file path. Copied - file will be restored in tearDown - """ - # Create new temporary file path and open needed files - temp_path = salt.utils.files.mkstemp() - with salt.utils.files.fopen(CONFIG, "r") as org_conf: - with salt.utils.files.fopen(temp_path, "w") as temp_sysconf: - # write sysctl lines to temp file - for line in org_conf: - temp_sysconf.write(line) - return temp_path - - def __restore_sysctl(self): - """ - Restores the original sysctl.conf file from temporary copy - """ - # If sysctl testing file exists, delete it - if os.path.isfile(CONFIG): - os.remove(CONFIG) - - # write temp lines to sysctl file to restore - with salt.utils.files.fopen(self.conf, "r") as temp_sysctl: - with salt.utils.files.fopen(CONFIG, "w") as sysctl: - for line in temp_sysctl: - sysctl.write(line) - - # delete temporary file - os.remove(self.conf) - - def __check_string(self, conf_file, to_find): - """ - Returns True if given line is present in file - """ - with salt.utils.files.fopen(conf_file, "r") as f_in: - for line in f_in: - if to_find in salt.utils.stringutils.to_unicode(line): - return True - return False - - def tearDown(self): - """ - Clean up after tests - """ - ret = self.run_function("sysctl.get", [ASSIGN_CMD]) - if ret != self.val: - self.run_function("sysctl.assign", [ASSIGN_CMD, self.val]) - - if self.has_conf is True: - # restore original sysctl file - self.__restore_sysctl() - - if self.has_conf is False and os.path.isfile(CONFIG): - # remove sysctl.conf created by tests - os.remove(CONFIG) diff --git a/tests/integration/modules/test_mac_timezone.py b/tests/integration/modules/test_mac_timezone.py deleted file mode 100644 index c424710a410e..000000000000 --- a/tests/integration/modules/test_mac_timezone.py +++ /dev/null @@ -1,206 +0,0 @@ -""" -Integration tests for mac_timezone - -If using parallels, make sure Time sync is turned off. Otherwise, parallels will -keep changing your date/time settings while the tests are running. To turn off -Time sync do the following: - - Go to actions -> configure - - Select options at the top and 'More Options' on the left - - Set time to 'Do not sync' -""" - -import datetime - -import pytest - -from tests.support.case import ModuleCase - - -@pytest.mark.flaky(max_runs=4) -@pytest.mark.skip_unless_on_darwin -@pytest.mark.skip_if_binaries_missing("systemsetup") -@pytest.mark.skip_if_not_root -@pytest.mark.slow_test -class MacTimezoneModuleTest(ModuleCase): - """ - Validate the mac_timezone module - """ - - USE_NETWORK_TIME = False - TIME_SERVER = "time.apple.com" - TIME_ZONE = "" - CURRENT_DATE = "" - CURRENT_TIME = "" - - def setUp(self): - """ - Get current settings - """ - self.USE_NETWORK_TIME = self.run_function("timezone.get_using_network_time") - self.TIME_SERVER = self.run_function("timezone.get_time_server") - self.TIME_ZONE = self.run_function("timezone.get_zone") - self.CURRENT_DATE = self.run_function("timezone.get_date") - self.CURRENT_TIME = self.run_function("timezone.get_time") - - self.run_function("timezone.set_using_network_time", [False]) - self.run_function("timezone.set_zone", ["America/Denver"]) - - def tearDown(self): - """ - Reset to original settings - """ - self.run_function("timezone.set_time_server", [self.TIME_SERVER]) - self.run_function("timezone.set_using_network_time", [self.USE_NETWORK_TIME]) - self.run_function("timezone.set_zone", [self.TIME_ZONE]) - if not self.USE_NETWORK_TIME: - self.run_function("timezone.set_date", [self.CURRENT_DATE]) - self.run_function("timezone.set_time", [self.CURRENT_TIME]) - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_get_set_date(self): - """ - Test timezone.get_date - Test timezone.set_date - """ - # Correct Functionality - self.assertTrue(self.run_function("timezone.set_date", ["2/20/2011"])) - self.assertEqual(self.run_function("timezone.get_date"), "2/20/2011") - - # Test bad date format - self.assertEqual( - self.run_function("timezone.set_date", ["13/12/2014"]), - "ERROR executing 'timezone.set_date': Invalid Date/Time Format: 13/12/2014", - ) - - @pytest.mark.slow_test - def test_get_time(self): - """ - Test timezone.get_time - """ - text_time = self.run_function("timezone.get_time") - self.assertNotEqual(text_time, "Invalid Timestamp") - obj_date = datetime.datetime.strptime(text_time, "%H:%M:%S") - self.assertIsInstance(obj_date, datetime.date) - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_set_time(self): - """ - Test timezone.set_time - """ - # Correct Functionality - self.assertTrue(self.run_function("timezone.set_time", ["3:14"])) - - # Test bad time format - self.assertEqual( - self.run_function("timezone.set_time", ["3:71"]), - "ERROR executing 'timezone.set_time': Invalid Date/Time Format: 3:71", - ) - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_get_set_zone(self): - """ - Test timezone.get_zone - Test timezone.set_zone - """ - # Correct Functionality - self.assertTrue(self.run_function("timezone.set_zone", ["Pacific/Wake"])) - self.assertEqual(self.run_function("timezone.get_zone"), "Pacific/Wake") - - # Test bad time zone - self.assertEqual( - self.run_function("timezone.set_zone", ["spongebob"]), - "ERROR executing 'timezone.set_zone': Invalid Timezone: spongebob", - ) - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_get_offset(self): - """ - Test timezone.get_offset - """ - self.assertTrue(self.run_function("timezone.set_zone", ["Pacific/Wake"])) - self.assertIsInstance(self.run_function("timezone.get_offset"), (str,)) - self.assertEqual(self.run_function("timezone.get_offset"), "+1200") - - self.assertTrue(self.run_function("timezone.set_zone", ["America/Los_Angeles"])) - self.assertIsInstance(self.run_function("timezone.get_offset"), (str,)) - self.assertEqual(self.run_function("timezone.get_offset"), "-0700") - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_get_set_zonecode(self): - """ - Test timezone.get_zonecode - Test timezone.set_zonecode - """ - self.assertTrue(self.run_function("timezone.set_zone", ["America/Los_Angeles"])) - self.assertIsInstance(self.run_function("timezone.get_zonecode"), (str,)) - self.assertEqual(self.run_function("timezone.get_zonecode"), "PDT") - - self.assertTrue(self.run_function("timezone.set_zone", ["Pacific/Wake"])) - self.assertIsInstance(self.run_function("timezone.get_zonecode"), (str,)) - self.assertEqual(self.run_function("timezone.get_zonecode"), "WAKT") - - @pytest.mark.slow_test - def test_list_zones(self): - """ - Test timezone.list_zones - """ - zones = self.run_function("timezone.list_zones") - self.assertIsInstance(self.run_function("timezone.list_zones"), list) - self.assertIn("America/Denver", self.run_function("timezone.list_zones")) - self.assertIn("America/Los_Angeles", self.run_function("timezone.list_zones")) - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_zone_compare(self): - """ - Test timezone.zone_compare - """ - self.assertTrue(self.run_function("timezone.set_zone", ["America/Denver"])) - self.assertTrue(self.run_function("timezone.zone_compare", ["America/Denver"])) - self.assertFalse(self.run_function("timezone.zone_compare", ["Pacific/Wake"])) - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_get_set_using_network_time(self): - """ - Test timezone.get_using_network_time - Test timezone.set_using_network_time - """ - self.assertTrue(self.run_function("timezone.set_using_network_time", [True])) - self.assertTrue(self.run_function("timezone.get_using_network_time")) - - self.assertTrue(self.run_function("timezone.set_using_network_time", [False])) - self.assertFalse(self.run_function("timezone.get_using_network_time")) - - @pytest.mark.skip( - reason="Skip until we can figure out why modifying the system clock causes ZMQ errors", - ) - @pytest.mark.destructive_test - def test_get_set_time_server(self): - """ - Test timezone.get_time_server - Test timezone.set_time_server - """ - self.assertTrue( - self.run_function("timezone.set_time_server", ["spongebob.com"]) - ) - self.assertEqual(self.run_function("timezone.get_time_server"), "spongebob.com") diff --git a/tests/integration/modules/test_mac_user.py b/tests/integration/modules/test_mac_user.py deleted file mode 100644 index 144267d72f6b..000000000000 --- a/tests/integration/modules/test_mac_user.py +++ /dev/null @@ -1,235 +0,0 @@ -""" - :codeauthor: Nicole Thomas -""" - -import os - -import pytest -from saltfactories.utils import random_string - -import salt.utils.files -from salt.exceptions import CommandExecutionError -from tests.support.case import ModuleCase - -# Create user strings for tests -ADD_USER = random_string("RS-", lowercase=False) -DEL_USER = random_string("RS-", lowercase=False) -PRIMARY_GROUP_USER = random_string("RS-", lowercase=False) -CHANGE_USER = random_string("RS-", lowercase=False) - - -@pytest.mark.skip_if_not_root -@pytest.mark.destructive_test -@pytest.mark.skip_unless_on_darwin -class MacUserModuleTest(ModuleCase): - """ - Integration tests for the mac_user module - """ - - def setUp(self): - """ - Sets up test requirements - """ - super().setUp() - os_grain = self.run_function("grains.item", ["kernel"]) - if os_grain["kernel"] not in "Darwin": - self.skipTest("Test not applicable to '{kernel}' kernel".format(**os_grain)) - - @pytest.mark.slow_test - def test_mac_user_add(self): - """ - Tests the add function - """ - try: - self.run_function("user.add", [ADD_USER]) - user_info = self.run_function("user.info", [ADD_USER]) - self.assertEqual(ADD_USER, user_info["name"]) - except CommandExecutionError: - self.run_function("user.delete", [ADD_USER]) - raise - - @pytest.mark.slow_test - def test_mac_user_delete(self): - """ - Tests the delete function - """ - - # Create a user to delete - If unsuccessful, skip the test - if self.run_function("user.add", [DEL_USER]) is not True: - self.run_function("user.delete", [DEL_USER]) - self.skipTest("Failed to create a user to delete") - - # Now try to delete the added user - ret = self.run_function("user.delete", [DEL_USER]) - self.assertTrue(ret) - - @pytest.mark.slow_test - def test_mac_user_primary_group(self): - """ - Tests the primary_group function - """ - - # Create a user to test primary group function - if self.run_function("user.add", [PRIMARY_GROUP_USER]) is not True: - self.run_function("user.delete", [PRIMARY_GROUP_USER]) - self.skipTest("Failed to create a user") - - try: - # Test mac_user.primary_group - primary_group = self.run_function( - "user.primary_group", [PRIMARY_GROUP_USER] - ) - uid_info = self.run_function("user.info", [PRIMARY_GROUP_USER]) - self.assertIn(primary_group, uid_info["groups"]) - - except AssertionError: - self.run_function("user.delete", [PRIMARY_GROUP_USER]) - raise - - @pytest.mark.slow_test - def test_mac_user_changes(self): - """ - Tests mac_user functions that change user properties - """ - # Create a user to manipulate - if unsuccessful, skip the test - if self.run_function("user.add", [CHANGE_USER]) is not True: - self.run_function("user.delete", [CHANGE_USER]) - self.skipTest("Failed to create a user") - - try: - # Test mac_user.chuid - self.run_function("user.chuid", [CHANGE_USER, 4376]) - uid_info = self.run_function("user.info", [CHANGE_USER]) - self.assertEqual(uid_info["uid"], 4376) - - # Test mac_user.chgid - self.run_function("user.chgid", [CHANGE_USER, 4376]) - gid_info = self.run_function("user.info", [CHANGE_USER]) - self.assertEqual(gid_info["gid"], 4376) - - # Test mac.user.chshell - self.run_function("user.chshell", [CHANGE_USER, "/bin/zsh"]) - shell_info = self.run_function("user.info", [CHANGE_USER]) - self.assertEqual(shell_info["shell"], "/bin/zsh") - - # Test mac_user.chhome - self.run_function("user.chhome", [CHANGE_USER, "/Users/foo"]) - home_info = self.run_function("user.info", [CHANGE_USER]) - self.assertEqual(home_info["home"], "/Users/foo") - - # Test mac_user.chfullname - self.run_function("user.chfullname", [CHANGE_USER, "Foo Bar"]) - fullname_info = self.run_function("user.info", [CHANGE_USER]) - self.assertEqual(fullname_info["fullname"], "Foo Bar") - - # Test mac_user.chgroups - pre_info = self.run_function("user.info", [CHANGE_USER])["groups"] - expected = pre_info + ["wheel"] - self.run_function("user.chgroups", [CHANGE_USER, "wheel"]) - groups_info = self.run_function("user.info", [CHANGE_USER]) - self.assertEqual(groups_info["groups"], expected) - - except AssertionError: - self.run_function("user.delete", [CHANGE_USER]) - raise - - @pytest.mark.slow_test - def test_mac_user_enable_auto_login(self): - """ - Tests mac_user functions that enable auto login - """ - # Make sure auto login is disabled before we start - if self.run_function("user.get_auto_login"): - self.skipTest("Auto login already enabled") - - try: - # Does enable return True - self.assertTrue( - self.run_function( - "user.enable_auto_login", ["Spongebob", "Squarepants"] - ) - ) - - # Did it set the user entry in the plist file - self.assertEqual(self.run_function("user.get_auto_login"), "Spongebob") - - # Did it generate the `/etc/kcpassword` file - self.assertTrue(os.path.exists("/etc/kcpassword")) - - # Are the contents of the file correct - test_data = bytes.fromhex("2e f8 27 42 a0 d9 ad 8b cd cd 6c 7d") - with salt.utils.files.fopen("/etc/kcpassword", "rb") as f: - file_data = f.read() - self.assertEqual(test_data, file_data) - - # Does disable return True - self.assertTrue(self.run_function("user.disable_auto_login")) - - # Does it remove the user entry in the plist file - self.assertFalse(self.run_function("user.get_auto_login")) - - # Is the `/etc/kcpassword` file removed - self.assertFalse(os.path.exists("/etc/kcpassword")) - - finally: - # Make sure auto_login is disabled - self.assertTrue(self.run_function("user.disable_auto_login")) - - # Make sure autologin is disabled - if self.run_function("user.get_auto_login"): - raise Exception("Failed to disable auto login") - - @pytest.mark.slow_test - def test_mac_user_disable_auto_login(self): - """ - Tests mac_user functions that disable auto login - """ - # Make sure auto login is enabled before we start - # Is there an existing setting - if self.run_function("user.get_auto_login"): - self.skipTest("Auto login already enabled") - - try: - # Enable auto login for the test - self.run_function("user.enable_auto_login", ["Spongebob", "Squarepants"]) - - # Make sure auto login got set up - if not self.run_function("user.get_auto_login") == "Spongebob": - raise Exception("Failed to enable auto login") - - # Does disable return True - self.assertTrue(self.run_function("user.disable_auto_login")) - - # Does it remove the user entry in the plist file - self.assertFalse(self.run_function("user.get_auto_login")) - - # Is the `/etc/kcpassword` file removed - self.assertFalse(os.path.exists("/etc/kcpassword")) - - finally: - # Make sure auto login is disabled - self.assertTrue(self.run_function("user.disable_auto_login")) - - # Make sure auto login is disabled - if self.run_function("user.get_auto_login"): - raise Exception("Failed to disable auto login") - - def tearDown(self): - """ - Clean up after tests - """ - - # Delete ADD_USER - add_info = self.run_function("user.info", [ADD_USER]) - if add_info: - self.run_function("user.delete", [ADD_USER]) - - # Delete DEL_USER if something failed - del_info = self.run_function("user.info", [DEL_USER]) - if del_info: - self.run_function("user.delete", [DEL_USER]) - - # Delete CHANGE_USER - change_info = self.run_function("user.info", [CHANGE_USER]) - if change_info: - self.run_function("user.delete", [CHANGE_USER]) diff --git a/tests/integration/modules/test_mac_xattr.py b/tests/integration/modules/test_mac_xattr.py deleted file mode 100644 index bcc39339b069..000000000000 --- a/tests/integration/modules/test_mac_xattr.py +++ /dev/null @@ -1,193 +0,0 @@ -""" -integration tests for mac_xattr -""" - -import os - -import pytest - -from tests.support.case import ModuleCase -from tests.support.runtests import RUNTIME_VARS - - -@pytest.mark.skip_if_binaries_missing("xattr") -@pytest.mark.skip_unless_on_darwin -class MacXattrModuleTest(ModuleCase): - """ - Validate the mac_xattr module - """ - - @classmethod - def setUpClass(cls): - cls.test_file = os.path.join(RUNTIME_VARS.TMP, "xattr_test_file.txt") - cls.no_file = os.path.join(RUNTIME_VARS.TMP, "xattr_no_file.txt") - - def setUp(self): - """ - Create test file for testing extended attributes - """ - self.run_function("file.touch", [self.test_file]) - - def tearDown(self): - """ - Clean up test file - """ - if os.path.exists(self.test_file): - os.remove(self.test_file) - - @pytest.mark.slow_test - def test_list_no_xattr(self): - """ - Make sure there are no attributes - """ - # Clear existing attributes - self.assertTrue(self.run_function("xattr.clear", [self.test_file])) - - # Test no attributes - self.assertEqual(self.run_function("xattr.list", [self.test_file]), {}) - - # Test file not found - self.assertEqual( - self.run_function("xattr.list", [self.no_file]), - "ERROR: File not found: {}".format(self.no_file), - ) - - @pytest.mark.slow_test - def test_write(self): - """ - Write an attribute - """ - # Clear existing attributes - self.assertTrue(self.run_function("xattr.clear", [self.test_file])) - - # Write some attributes - self.assertTrue( - self.run_function( - "xattr.write", [self.test_file, "spongebob", "squarepants"] - ) - ) - self.assertTrue( - self.run_function("xattr.write", [self.test_file, "squidward", "plankton"]) - ) - self.assertTrue( - self.run_function("xattr.write", [self.test_file, "crabby", "patty"]) - ) - - # Test that they were actually added - self.assertEqual( - self.run_function("xattr.list", [self.test_file]), - {"spongebob": "squarepants", "squidward": "plankton", "crabby": "patty"}, - ) - - # Test file not found - self.assertEqual( - self.run_function("xattr.write", [self.no_file, "patrick", "jellyfish"]), - "ERROR: File not found: {}".format(self.no_file), - ) - - @pytest.mark.slow_test - def test_read(self): - """ - Test xattr.read - """ - # Clear existing attributes - self.assertTrue(self.run_function("xattr.clear", [self.test_file])) - - # Write an attribute - self.assertTrue( - self.run_function( - "xattr.write", [self.test_file, "spongebob", "squarepants"] - ) - ) - - # Read the attribute - self.assertEqual( - self.run_function("xattr.read", [self.test_file, "spongebob"]), - "squarepants", - ) - - # Test file not found - self.assertEqual( - self.run_function("xattr.read", [self.no_file, "spongebob"]), - "ERROR: File not found: {}".format(self.no_file), - ) - - # Test attribute not found - self.assertEqual( - self.run_function("xattr.read", [self.test_file, "patrick"]), - "ERROR: Attribute not found: patrick", - ) - - @pytest.mark.slow_test - def test_delete(self): - """ - Test xattr.delete - """ - # Clear existing attributes - self.assertTrue(self.run_function("xattr.clear", [self.test_file])) - - # Write some attributes - self.assertTrue( - self.run_function( - "xattr.write", [self.test_file, "spongebob", "squarepants"] - ) - ) - self.assertTrue( - self.run_function("xattr.write", [self.test_file, "squidward", "plankton"]) - ) - self.assertTrue( - self.run_function("xattr.write", [self.test_file, "crabby", "patty"]) - ) - - # Delete an attribute - self.assertTrue( - self.run_function("xattr.delete", [self.test_file, "squidward"]) - ) - - # Make sure it was actually deleted - self.assertEqual( - self.run_function("xattr.list", [self.test_file]), - {"spongebob": "squarepants", "crabby": "patty"}, - ) - - # Test file not found - self.assertEqual( - self.run_function("xattr.delete", [self.no_file, "spongebob"]), - "ERROR: File not found: {}".format(self.no_file), - ) - - # Test attribute not found - self.assertEqual( - self.run_function("xattr.delete", [self.test_file, "patrick"]), - "ERROR: Attribute not found: patrick", - ) - - @pytest.mark.slow_test - def test_clear(self): - """ - Test xattr.clear - """ - # Clear existing attributes - self.assertTrue(self.run_function("xattr.clear", [self.test_file])) - - # Write some attributes - self.assertTrue( - self.run_function( - "xattr.write", [self.test_file, "spongebob", "squarepants"] - ) - ) - self.assertTrue( - self.run_function("xattr.write", [self.test_file, "squidward", "plankton"]) - ) - self.assertTrue( - self.run_function("xattr.write", [self.test_file, "crabby", "patty"]) - ) - - # Test Clear - self.assertTrue(self.run_function("xattr.clear", [self.test_file])) - - # Test file not found - self.assertEqual( - self.run_function("xattr.clear", [self.no_file]), - "ERROR: File not found: {}".format(self.no_file), - ) diff --git a/tests/pkg/rpm/salt.spec b/tests/pkg/rpm/salt.spec deleted file mode 100644 index 7d8183a9d87b..000000000000 --- a/tests/pkg/rpm/salt.spec +++ /dev/null @@ -1,470 +0,0 @@ -# Maintainer: Erik Johnson (https://github.com/terminalmage) -# -# This is a modified version of the spec file, which supports git builds. It -# should be kept more or less up-to-date with upstream changes. -# -# Please contact the maintainer before submitting any pull requests for this -# spec file. - -%if ! (0%{?rhel} >= 6 || 0%{?fedora} > 12) -%global with_python26 1 -%define pybasever 2.6 -%define __python_ver 26 -%define __python %{_bindir}/python%{?pybasever} -%endif - -%global include_tests 0 - -%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} -%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} -%{!?pythonpath: %global pythonpath %(%{__python} -c "import os, sys; print(os.pathsep.join(x for x in sys.path if x))")} - -%global srcver REPLACE_ME - -Name: salt -Version: REPLACE_ME -Release: 1%{?dist} -Summary: A parallel remote execution system - -Group: System Environment/Daemons -License: ASL 2.0 -URL: http://saltstack.org/ -Source0: %{name}-%{srcver}.tar.gz -Source1: %{name}-master -Source2: %{name}-syndic -Source3: %{name}-minion -Source4: %{name}-api -Source5: %{name}-master.service -Source6: %{name}-syndic.service -Source7: %{name}-minion.service -Source8: %{name}-api.service -Source9: README.fedora -Source10: logrotate.salt -Source11: salt.bash - -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -BuildArch: noarch - -%ifarch %{ix86} x86_64 -Requires: dmidecode -%endif - -Requires: pciutils -Requires: which -Requires: yum-utils - -%if 0%{?with_python26} - -BuildRequires: python26-devel -Requires: python26-crypto >= 2.6.1 -Requires: python26-jinja2 -Requires: python26-msgpack > 0.3 -Requires: python26-PyYAML -Requires: python26-tornado >= 4.2.1 -Requires: python26-zmq -Requires: python26-six - -%else - -%if ((0%{?rhel} >= 6 || 0%{?fedora} > 12) && 0%{?include_tests}) -BuildRequires: python-tornado >= 4.2.1 -BuildRequires: python-futures >= 2.0 -BuildRequires: python-crypto >= 2.6.1 -BuildRequires: python-jinja2 -BuildRequires: python-msgpack > 0.3 -BuildRequires: python-pip -BuildRequires: python-zmq -BuildRequires: PyYAML -# this BR causes windows tests to happen -# clearly, that's not desired -# https://github.com/saltstack/salt/issues/3749 -BuildRequires: git -BuildRequires: python-libcloud -BuildRequires: python-six - - -%endif -BuildRequires: python-devel -Requires: m2crypto -Requires: python-crypto -Requires: python-zmq -Requires: python-jinja2 -Requires: PyYAML -Requires: python-msgpack -Requires: python-requests - -%endif - -%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15) - -Requires(post): chkconfig -Requires(preun): chkconfig -Requires(preun): initscripts -Requires(postun): initscripts - -%else - -%if 0%{?systemd_preun:1} - -Requires(post): systemd-units -Requires(preun): systemd-units -Requires(postun): systemd-units - -%endif - -BuildRequires: systemd-units -Requires: systemd-python - -%endif - -%description -Salt is a distributed remote execution system used to execute commands and -query data. It was developed in order to bring the best solutions found in -the world of remote execution together and make them better, faster and more -malleable. Salt accomplishes this via its ability to handle larger loads of -information, and not just dozens, but hundreds or even thousands of individual -servers, handle them quickly and through a simple and manageable interface. - -%package master -Summary: Management component for salt, a parallel remote execution system -Group: System Environment/Daemons -Requires: %{name} = %{version}-%{release} -%if (0%{?rhel} >= 7 || 0%{?fedora} >= 15) -Requires: systemd-python -%endif - -%description master -The Salt master is the central server to which all minions connect. - -%package minion -Summary: Client component for Salt, a parallel remote execution system -Group: System Environment/Daemons -Requires: %{name} = %{version}-%{release} - -%description minion -The Salt minion is the agent component of Salt. It listens for instructions -from the master, runs jobs, and returns results back to the master. - -%package syndic -Summary: Master-of-master component for Salt, a parallel remote execution system -Group: System Environment/Daemons -Requires: %{name}-master = %{version}-%{release} - -%description syndic -The Salt syndic is a master daemon which can receive instruction from a -higher-level master, allowing for tiered organization of your Salt -infrastructure. - -%package api -Summary: REST API for Salt, a parallel remote execution system -Group: System administration tools -Requires: %{name}-master = %{version}-%{release} -%if 0%{?with_python26} -Requires: python26-cherrypy -%else -Requires: python-cherrypy -%endif - - -%description api -salt-api provides a REST interface to the Salt master. - -%package cloud -Summary: Cloud provisioner for Salt, a parallel remote execution system -Group: System administration tools -Requires: %{name}-master = %{version}-%{release} -%if 0%{?with_python26} -Requires: python26-libcloud -%else -Requires: python-libcloud -%endif - -%description cloud -The salt-cloud tool provisions new cloud VMs, installs salt-minion on them, and -adds them to the master's collection of controllable minions. - -%package ssh -Summary: Agentless SSH-based version of Salt, a parallel remote execution system -Group: System administration tools -Requires: %{name} = %{version}-%{release} - -%description ssh -The salt-ssh tool can run remote execution functions and states without the use -of an agent (salt-minion) service. - -%prep -%setup -n %{name}-%{srcver} - -%build - - -%install -rm -rf %{buildroot} -%{__python} setup.py install -O1 --root %{buildroot} - -# Add some directories -install -d -m 0755 %{buildroot}%{_var}/log/salt -touch %{buildroot}%{_var}/log/salt/minion -touch %{buildroot}%{_var}/log/salt/master -install -d -m 0755 %{buildroot}%{_var}/cache/salt -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/master.d -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/minion.d -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/pki -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/pki/master -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/pki/minion -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/cloud.conf.d -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/cloud.deploy.d -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/cloud.maps.d -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/cloud.profiles.d -install -d -m 0755 %{buildroot}%{_sysconfdir}/salt/cloud.providers.d - -# Add the config files -install -p -m 0640 conf/minion %{buildroot}%{_sysconfdir}/salt/minion -install -p -m 0640 conf/master %{buildroot}%{_sysconfdir}/salt/master -install -p -m 0640 conf/cloud %{buildroot}%{_sysconfdir}/salt/cloud -install -p -m 0640 conf/roster %{buildroot}%{_sysconfdir}/salt/roster -install -p -m 0640 conf/proxy %{buildroot}%{_sysconfdir}/salt/proxy - -%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15) -mkdir -p %{buildroot}%{_initrddir} -install -p %{SOURCE1} %{buildroot}%{_initrddir}/ -install -p %{SOURCE2} %{buildroot}%{_initrddir}/ -install -p %{SOURCE3} %{buildroot}%{_initrddir}/ -install -p %{SOURCE4} %{buildroot}%{_initrddir}/ -%else -mkdir -p %{buildroot}%{_unitdir} -install -p -m 0644 %{SOURCE5} %{buildroot}%{_unitdir}/ -install -p -m 0644 %{SOURCE6} %{buildroot}%{_unitdir}/ -install -p -m 0644 %{SOURCE7} %{buildroot}%{_unitdir}/ -install -p -m 0644 %{SOURCE8} %{buildroot}%{_unitdir}/ -%endif - -# Force python2.6 on EPEL6 -# https://github.com/saltstack/salt/issues/22003 -%if 0%{?rhel} == 6 -sed -i 's#/usr/bin/python#/usr/bin/python2.6#g' %{buildroot}%{_bindir}/salt* -sed -i 's#/usr/bin/python#/usr/bin/python2.6#g' %{buildroot}%{_initrddir}/salt* -%endif - -install -p %{SOURCE9} . - -# Logrotate -mkdir -p %{buildroot}%{_sysconfdir}/logrotate.d/ -install -p %{SOURCE10} %{buildroot}%{_sysconfdir}/logrotate.d/salt - -# Bash completion -mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d/ -install -p -m 0644 %{SOURCE11} %{buildroot}%{_sysconfdir}/bash_completion.d/salt.bash - -%clean -rm -rf %{buildroot} - -%files -%defattr(-,root,root,-) -%doc LICENSE -%doc %{_mandir}/man1/spm.1.* -%{python_sitelib}/%{name}/* -%{python_sitelib}/%{name}-*-py?.?.egg-info -%{_sysconfdir}/logrotate.d/salt -%{_sysconfdir}/bash_completion.d/salt.bash -%{_var}/cache/salt -%{_var}/log/salt -%doc README.fedora -%{_bindir}/spm -%config(noreplace) %{_sysconfdir}/salt/ -%config(noreplace) %{_sysconfdir}/salt/pki - -%files master -%defattr(-,root,root) -%doc %{_mandir}/man7/salt.7.* -%doc %{_mandir}/man1/salt-cp.1.* -%doc %{_mandir}/man1/salt-key.1.* -%doc %{_mandir}/man1/salt-master.1.* -%doc %{_mandir}/man1/salt-run.1.* -%{_bindir}/salt -%{_bindir}/salt-cp -%{_bindir}/salt-key -%{_bindir}/salt-master -%{_bindir}/salt-run -%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15) -%attr(0755, root, root) %{_initrddir}/salt-master -%else -%{_unitdir}/salt-master.service -%endif -%config(noreplace) %{_sysconfdir}/salt/master -%config(noreplace) %{_sysconfdir}/salt/master.d -%config(noreplace) %{_sysconfdir}/salt/pki/master -%config(noreplace) %{_var}/log/salt/master - -%files minion -%defattr(-,root,root) -%doc %{_mandir}/man1/salt-call.1.* -%doc %{_mandir}/man1/salt-minion.1.* -%doc %{_mandir}/man1/salt-proxy.1.* -%{_bindir}/salt-minion -%{_bindir}/salt-call -%{_bindir}/salt-proxy -%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15) -%attr(0755, root, root) %{_initrddir}/salt-minion -%else -%{_unitdir}/salt-minion.service -%endif -%config(noreplace) %{_sysconfdir}/salt/minion -%config(noreplace) %{_sysconfdir}/salt/proxy -%config(noreplace) %{_sysconfdir}/salt/minion.d -%config(noreplace) %{_sysconfdir}/salt/pki/minion -%config(noreplace) %{_var}/log/salt/minion - -%files syndic -%doc %{_mandir}/man1/salt-syndic.1.* -%{_bindir}/salt-syndic -%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15) -%attr(0755, root, root) %{_initrddir}/salt-syndic -%else -%{_unitdir}/salt-syndic.service -%endif - -%files api -%defattr(-,root,root) -%doc %{_mandir}/man1/salt-api.1.* -%{_bindir}/salt-api -%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15) -%attr(0755, root, root) %{_initrddir}/salt-api -%else -%{_unitdir}/salt-api.service -%endif - -%files cloud -%doc %{_mandir}/man1/salt-cloud.1.* -%{_bindir}/salt-cloud -%{_sysconfdir}/salt/cloud.conf.d -%{_sysconfdir}/salt/cloud.deploy.d -%{_sysconfdir}/salt/cloud.maps.d -%{_sysconfdir}/salt/cloud.profiles.d -%{_sysconfdir}/salt/cloud.providers.d -%config(noreplace) %{_sysconfdir}/salt/cloud - -%files ssh -%doc %{_mandir}/man1/salt-ssh.1.* -%{_bindir}/salt-ssh -%config(noreplace) %{_sysconfdir}/salt/roster - - -# less than RHEL 8 / Fedora 16 -# not sure if RHEL 7 will use systemd yet -%if ! (0%{?rhel} >= 7 || 0%{?fedora} >= 15) - -%preun master - if [ $1 -eq 0 ] ; then - /sbin/service salt-master stop >/dev/null 2>&1 - /sbin/chkconfig --del salt-master - fi - -%preun syndic - if [ $1 -eq 0 ] ; then - /sbin/service salt-syndic stop >/dev/null 2>&1 - /sbin/chkconfig --del salt-syndic - fi - -%preun minion - if [ $1 -eq 0 ] ; then - /sbin/service salt-minion stop >/dev/null 2>&1 - /sbin/chkconfig --del salt-minion - fi - -%post master - /sbin/chkconfig --add salt-master - -%post minion - /sbin/chkconfig --add salt-minion - -%postun master - if [ "$1" -ge "1" ] ; then - /sbin/service salt-master condrestart >/dev/null 2>&1 || : - fi - -%postun syndic - if [ "$1" -ge "1" ] ; then - /sbin/service salt-syndic condrestart >/dev/null 2>&1 || : - fi - -%postun minion - if [ "$1" -ge "1" ] ; then - /sbin/service salt-minion condrestart >/dev/null 2>&1 || : - fi - -%else - -%preun master -%if 0%{?systemd_preun:1} - %systemd_preun salt-master.service -%else - if [ $1 -eq 0 ] ; then - # Package removal, not upgrade - /bin/systemctl --no-reload disable salt-master.service > /dev/null 2>&1 || : - /bin/systemctl stop salt-master.service > /dev/null 2>&1 || : - fi -%endif - -%preun syndic -%if 0%{?systemd_preun:1} - %systemd_preun salt-syndic.service -%else - if [ $1 -eq 0 ] ; then - # Package removal, not upgrade - /bin/systemctl --no-reload disable salt-syndic.service > /dev/null 2>&1 || : - /bin/systemctl stop salt-syndic.service > /dev/null 2>&1 || : - fi -%endif - -%preun minion -%if 0%{?systemd_preun:1} - %systemd_preun salt-minion.service -%else - if [ $1 -eq 0 ] ; then - # Package removal, not upgrade - /bin/systemctl --no-reload disable salt-minion.service > /dev/null 2>&1 || : - /bin/systemctl stop salt-minion.service > /dev/null 2>&1 || : - fi -%endif - -%post master -%if 0%{?systemd_post:1} - %systemd_post salt-master.service -%else - /bin/systemctl daemon-reload &>/dev/null || : -%endif - -%post minion -%if 0%{?systemd_post:1} - %systemd_post salt-minion.service -%else - /bin/systemctl daemon-reload &>/dev/null || : -%endif - -%postun master -%if 0%{?systemd_post:1} - %systemd_postun salt-master.service -%else - /bin/systemctl daemon-reload &>/dev/null - [ $1 -gt 0 ] && /bin/systemctl try-restart salt-master.service &>/dev/null || : -%endif - -%postun syndic -%if 0%{?systemd_post:1} - %systemd_postun salt-syndic.service -%else - /bin/systemctl daemon-reload &>/dev/null - [ $1 -gt 0 ] && /bin/systemctl try-restart salt-syndic.service &>/dev/null || : -%endif - -%postun minion -%if 0%{?systemd_post:1} - %systemd_postun salt-minion.service -%else - /bin/systemctl daemon-reload &>/dev/null - [ $1 -gt 0 ] && /bin/systemctl try-restart salt-minion.service &>/dev/null || : -%endif - -%endif diff --git a/tests/pytests/functional/modules/test_mac_assistive.py b/tests/pytests/functional/modules/test_mac_assistive.py new file mode 100644 index 000000000000..d2b6808b56d0 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_assistive.py @@ -0,0 +1,105 @@ +""" + :codeauthor: Nicole Thomas +""" + +import pytest + +from salt.exceptions import CommandExecutionError + +pytestmark = [ + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def assistive(modules): + return modules.assistive + + +@pytest.fixture +def osa_script(assistive): + osa_script_path = "/usr/bin/osascript" + try: + ret = assistive.install(osa_script_path, True) + yield osa_script_path + except CommandExecutionError as exc: + pytest.skip(f"Unable to install {osa_script}: {exc}") + finally: + osa_script_ret = assistive.installed(osa_script_path) + if osa_script_ret: + assistive.remove(osa_script_path) + + +@pytest.fixture +def install_remove_pkg_name(assistive): + smile_bundle = "com.smileonmymac.textexpander" + try: + yield smile_bundle + finally: + smile_bundle_present = assistive.installed(smile_bundle) + if smile_bundle_present: + assistive.remove(smile_bundle) + + +@pytest.mark.slow_test +def test_install_and_remove(assistive, install_remove_pkg_name): + """ + Tests installing and removing a bundled ID or command to use assistive access. + """ + ret = assistive.install(install_remove_pkg_name) + assert ret + ret = assistive.remove(install_remove_pkg_name) + assert ret + + +@pytest.mark.slow_test +def test_installed(assistive, osa_script): + """ + Tests the True and False return of assistive.installed. + """ + # OSA script should have been installed in _setup_teardown_vars function + ret = assistive.installed(osa_script) + assert ret + # Clean up install + assistive.remove(osa_script) + # Installed should now return False + ret = assistive.installed(osa_script) + assert not ret + + +@pytest.mark.slow_test +def test_enable(assistive, osa_script): + """ + Tests setting the enabled status of a bundled ID or command. + """ + # OSA script should have been installed and enabled in _setup_teardown_vars function + # Now let's disable it, which should return True. + ret = assistive.enable(osa_script, False) + assert ret + # Double check the script was disabled, as intended. + ret = assistive.enabled(osa_script) + assert not ret + # Now re-enable + ret = assistive.enable(osa_script) + assert ret + # Double check the script was enabled, as intended. + ret = assistive.enabled(osa_script) + assert ret + + +@pytest.mark.slow_test +def test_enabled(assistive, osa_script): + """ + Tests if a bundled ID or command is listed in assistive access returns True. + """ + # OSA script should have been installed in _setup_teardown_vars function, which sets + # enabled to True by default. + ret = assistive.enabled(osa_script) + assert ret + # Disable OSA Script + assistive.enable(osa_script, False) + # Assert against new disabled status + ret = assistive.enabled(osa_script) + assert not ret diff --git a/tests/pytests/functional/modules/test_mac_brew_pkg.py b/tests/pytests/functional/modules/test_mac_brew_pkg.py new file mode 100644 index 000000000000..ae6fe9971bf6 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_brew_pkg.py @@ -0,0 +1,132 @@ +""" + :codeauthor: Nicole Thomas + :codeauthor: Gareth J. Greenaway +""" + +import pytest + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, + pytest.mark.skip_if_binaries_missing("brew"), +] + + +@pytest.fixture(scope="module") +def pkg(modules): + return modules.pkg + + +@pytest.fixture +def pkg_1_name(pkg): + pkg_name = "algol68g" + try: + yield pkg_name + finally: + pkg_list = pkg.list_pkgs() + + # Remove package if installed + if pkg_name in pkg_list: + pkg.remove(pkg_name) + + +@pytest.fixture +def pkg_2_name(pkg): + pkg_name = "acme" + try: + pkg.install(pkg_name) + pkg_list = pkg.list_pkgs() + if pkg_name not in pkg_list: + pytest.skip(f"Failed to install the '{pkg_name}' package to delete") + yield pkg_name + finally: + pkg_list = pkg.list_pkgs() + + # Remove package if still installed + if pkg_name in pkg_list: + pkg.remove(pkg_name) + + +def test_brew_install(pkg, pkg_1_name): + """ + Tests the installation of packages + """ + pkg.install(pkg_1_name) + pkg_list = pkg.list_pkgs() + assert pkg_1_name in pkg_list + + +def test_remove(pkg, pkg_2_name): + """ + Tests the removal of packages + """ + pkg.remove(pkg_2_name) + pkg_list = pkg.list_pkgs() + assert pkg_2_name not in pkg_list + + +def test_version(pkg, pkg_1_name): + """ + Test pkg.version for mac. Installs a package and then checks we can get + a version for the installed package. + """ + pkg.install(pkg_1_name) + pkg_list = pkg.list_pkgs() + version = pkg.version(pkg_1_name) + assert version + assert pkg_1_name in pkg_list + # make sure the version is accurate and is listed in the pkg_list + assert version in str(pkg_list[pkg_1_name]) + + +def test_latest_version(pkg, pkg_1_name): + """ + Test pkg.latest_version: + - get the latest version available + - install the package + - get the latest version available + - check that the latest version is empty after installing it + """ + pkg.remove(pkg_1_name) + uninstalled_latest = pkg.latest_version(pkg_1_name) + + pkg.install(pkg_1_name) + installed_latest = pkg.latest_version(pkg_1_name) + version = pkg.version(pkg_1_name) + assert isinstance(uninstalled_latest, str) + assert installed_latest == version + + +def test_refresh_db(pkg): + """ + Integration test to ensure pkg.refresh_db works with brew + """ + refresh_brew = pkg.refresh_db() + assert refresh_brew + + +def test_list_upgrades(pkg, pkg_1_name): + """ + Test pkg.list_upgrades: data is in the form {'name1': 'version1', 'name2': 'version2', ... } + """ + upgrades = pkg.list_upgrades() + assert isinstance(upgrades, dict) + if upgrades: + for name in upgrades: + assert isinstance(name, str) + assert isinstance(upgrades[name], str) + + +def test_info_installed(pkg, pkg_1_name): + """ + Test pkg.info_installed: info returned has certain fields used by + mac_brew.latest_version + """ + pkg.install(pkg_1_name) + info = pkg.info_installed(pkg_1_name) + assert pkg_1_name in info + assert "versions" in info[pkg_1_name] + assert "revision" in info[pkg_1_name] + assert "stable" in info[pkg_1_name]["versions"] diff --git a/tests/pytests/functional/modules/test_mac_desktop.py b/tests/pytests/functional/modules/test_mac_desktop.py new file mode 100644 index 000000000000..7de6744adc06 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_desktop.py @@ -0,0 +1,74 @@ +""" +Integration tests for the mac_desktop execution module. +""" + +import pytest + +from salt.exceptions import CommandExecutionError + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def desktop(modules): + return modules.desktop + + +def test_get_output_volume(desktop): + """ + Tests the return of get_output_volume. + """ + ret = desktop.get_output_volume() + assert ret is not None + + +def test_set_output_volume(desktop): + """ + Tests the return of set_output_volume. + """ + current_vol = desktop.get_output_volume() + try: + to_set = 10 + if current_vol == str(to_set): + to_set += 2 + new_vol = desktop.set_output_volume(str(to_set)) + check_vol = desktop.get_output_volume() + assert new_vol == check_vol + finally: + # Set volume back to what it was before + desktop.set_output_volume(current_vol) + + +def test_screensaver(desktop): + """ + Tests the return of the screensaver function. + """ + try: + ret = desktop.screensaver() + except CommandExecutionError as exc: + pytest.skip("Skipping. Screensaver unavailable.") + assert ret + + +def test_lock(desktop): + """ + Tests the return of the lock function. + """ + try: + ret = desktop.lock() + except CommandExecutionError as exc: + pytest.skip("Skipping. Unable to lock screen.") + assert ret + + +def test_say(desktop): + """ + Tests the return of the say function. + """ + ret = desktop.say("hello", "world") + assert ret diff --git a/tests/pytests/functional/modules/test_mac_group.py b/tests/pytests/functional/modules/test_mac_group.py new file mode 100644 index 000000000000..2f88943d979f --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_group.py @@ -0,0 +1,176 @@ +""" + :codeauthor: Nicole Thomas +""" + +import pytest +from saltfactories.utils import random_string + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def group(modules): + return modules.group + + +# Create group name strings for tests +@pytest.fixture(scope="module") +def non_existing_group_name(group): + group_name = random_string("group-", lowercase=False) + try: + yield group_name + finally: + # Delete the added group + group_info = group.info(group_name) + if group_info: + group.delete(group_name) + + +@pytest.fixture(scope="module") +def existing_group_name(group): + group_name = random_string("group-", lowercase=False) + try: + ret = group.add(group_name, 4567) + if ret is not True: + pytest.skip("Failed to create a group to delete") + yield group_name + finally: + # Delete the added group + group_info = group.info(group_name) + if group_info: + group.delete(group_name) + + +@pytest.fixture(scope="module") +def non_existing_user(group): + group_name = random_string("user-", lowercase=False) + try: + yield group_name + finally: + # Delete the added group + group_info = group.info(group_name) + if group_info: + group.delete(group_name) + + +@pytest.fixture(scope="module") +def existing_user(group, existing_group_name): + group_name = random_string("user-", lowercase=False) + try: + ret = group.adduser(existing_group_name, group_name) + if ret is not True: + pytest.skip("Failed to create an existing group member") + yield group_name + finally: + # Delete the added group + group_info = group.info(group_name) + if group_info: + group.delete(group_name) + + +@pytest.fixture(scope="module") +def rep_user_group(): + yield random_string("RS-", lowercase=False) + + +@pytest.fixture(scope="module") +def non_existing_group_member(group): + group_name = random_string("user-", lowercase=False) + try: + yield group_name + finally: + # Delete the added group + group_info = group.info(group_name) + if group_info: + group.delete(group_name) + + +def test_mac_group_add(group, non_existing_group_name): + """ + Tests the add group function + """ + group.add(non_existing_group_name, 3456) + group_info = group.info(non_existing_group_name) + assert group_info["name"] == non_existing_group_name + + +def test_mac_group_delete(group, existing_group_name): + """ + Tests the delete group function + """ + ret = group.delete(existing_group_name) + assert ret + + +def test_mac_group_chgid(group, existing_group_name): + """ + Tests changing the group id + """ + gid = 6789 + group_info = group.info(existing_group_name) + assert group_info["gid"] != gid + group.chgid(existing_group_name, gid) + group_info = group.info(existing_group_name) + assert group_info["gid"] == gid + + +def test_mac_adduser(group, non_existing_group_name, non_existing_user): + """ + Tests adding user to the group + """ + # Create a group to use for test - If unsuccessful, skip the test + ret = group.add(non_existing_group_name, 5678) + if ret is not True: + group.delete(non_existing_group_name) + pytest.skip("Failed to create a group to manipulate") + + group.adduser(non_existing_group_name, non_existing_user) + group_info = group.info(non_existing_group_name) + assert non_existing_user in group_info["members"] + assert group_info["members"] == [non_existing_user] + + +def test_mac_deluser(group, existing_group_name, existing_user): + """ + Test deleting user from a group + """ + delusr = group.deluser(existing_group_name, existing_user) + assert delusr + + group_info = group.info(existing_group_name) + assert existing_user not in group_info["members"] + + +def test_mac_members( + group, existing_group_name, existing_user, non_existing_group_member +): + """ + Test replacing members of a group + """ + group_info = group.info(existing_group_name) + assert non_existing_group_member not in group_info["members"] + assert non_existing_user in group_info["members"] + + # Replace group members + rep_group_mem = group.members(existing_group_name, non_existing_group_member) + assert rep_group_mem + + # ensure new user is added to group and previous user is removed + group_info = group.info(existing_group_name) + assert non_existing_group_member in group_info["members"] + assert non_existing_user not in group_info["members"] + + +def test_mac_getent(group, existing_user, existing_group_name): + """ + Test returning info on all groups + """ + getinfo = group.getent() + assert getinfo + assert existing_group_name in str(getinfo) + assert existing_user in str(getinfo) diff --git a/tests/pytests/functional/modules/test_mac_keychain.py b/tests/pytests/functional/modules/test_mac_keychain.py new file mode 100644 index 000000000000..eb67304487d2 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_keychain.py @@ -0,0 +1,129 @@ +""" +Validate the mac-keychain module +""" + +import os + +import pytest + +import salt.utils.versions +from tests.support.runtests import RUNTIME_VARS + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def cmd(modules): + return modules.cmd + + +@pytest.fixture(scope="module") +def keychain(modules): + return modules.keychain + + +@pytest.fixture(scope="function", autouse=True) +def setup_teardown_vars(keychain, base_env_state_tree_root_dir): + cert = os.path.join(RUNTIME_VARS.FILES, "file", "base", "certs", "salttest.p12") + cert_alias = "Salt Test" + passwd = "salttest" + + try: + yield cert, cert_alias, passwd + finally: + certs_list = keychain.list_certs() + if cert_alias in certs_list: + keychain.uninstall(cert_alias) + + +def test_mac_keychain_install(keychain, setup_teardown_vars): + """ + Tests that attempts to install a certificate + """ + + cert = setup_teardown_vars[0] + cert_alias = setup_teardown_vars[1] + passwd = setup_teardown_vars[2] + + install_cert = keychain.install(cert, passwd) + assert install_cert + assert install_cert == "1 identity imported." + + # check to ensure the cert was installed + certs_list = keychain.list_certs() + assert cert_alias in certs_list + + +def test_mac_keychain_uninstall(keychain, setup_teardown_vars): + """ + Tests that attempts to uninstall a certificate + """ + + cert = setup_teardown_vars[0] + cert_alias = setup_teardown_vars[1] + passwd = setup_teardown_vars[2] + + keychain.install(cert, passwd) + certs_list = keychain.list_certs() + + if cert_alias not in certs_list: + keychain.uninstall(cert_alias) + pytest.skip("Failed to install keychain") + + # uninstall cert + keychain.uninstall(cert_alias) + certs_list = keychain.list_certs() + + # check to ensure the cert was uninstalled + assert cert_alias not in str(certs_list) + + +@pytest.mark.skip_if_binaries_missing("openssl") +def test_mac_keychain_get_friendly_name(keychain, shell, setup_teardown_vars): + """ + Test that attempts to get friendly name of a cert + """ + cert = setup_teardown_vars[0] + cert_alias = setup_teardown_vars[1] + passwd = setup_teardown_vars[2] + + keychain.install(cert, passwd) + certs_list = keychain.list_certs() + if cert_alias not in certs_list: + keychain.uninstall(cert_alias) + pytest.skip("Failed to install keychain") + + ret = shell.run("openssl", "version") + assert ret.stdout + openssl_version = ret.stdout.split()[1] + + # openssl versions under 3.0.0 do not include legacy flag + if salt.utils.versions.compare(ver1=openssl_version, oper="<", ver2="3.0.0"): + get_name = keychain.get_friendly_name(cert, passwd, legacy=False) + else: + get_name = keychain.get_friendly_name(cert, passwd, legacy=True) + + assert get_name == cert_alias + + +def test_mac_keychain_get_default_keychain(keychain, cmd, setup_teardown_vars): + """ + Test that attempts to get the default keychain + """ + sys_get_keychain = keychain.get_default_keychain() + salt_get_keychain = cmd.run("security default-keychain -d user") + assert salt_get_keychain == sys_get_keychain + + +def test_mac_keychain_list_certs(keychain, setup_teardown_vars): + """ + Test that attempts to list certs + """ + cert_default = "com.apple.systemdefault" + certs = keychain.list_certs() + assert cert_default in certs diff --git a/tests/pytests/functional/modules/test_mac_portspkg.py b/tests/pytests/functional/modules/test_mac_portspkg.py new file mode 100644 index 000000000000..419af432c529 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_portspkg.py @@ -0,0 +1,118 @@ +""" +integration tests for mac_ports +""" + +import pytest + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, + pytest.mark.skip_if_binaries_missing("port"), +] + + +@pytest.fixture(scope="module") +def pkg(modules): + return modules.pkg + + +@pytest.fixture +def uninstalled_pkg_name(pkg): + pkgname = installed_pkg_name + try: + pkg.refresh_db() + yield pkgname + finally: + if pkgname in pkg.list_pkgs(): + pkg.remove(pkgname) + + +@pytest.fixture +def installed_pkg_name(uninstalled_pkg_name): + pkg.install(uninstalled_pkg_name) + return uninstalled_pkg_name + + +@pytest.fixture(scope="function", autouse=True) +def _setup_teardown_vars(pkg): + AGREE_INSTALLED = False + try: + ret = pkg.list_pkgs() + AGREE_INSTALLED = installed_pkg_name in ret + pkg.refresh_db() + yield + finally: + if AGREE_INSTALLED: + pkg.remove(installed_pkg_name) + + +def test_list_pkgs(pkg, installed_pkg_name): + """ + Test pkg.list_pkgs + """ + pkg_list_ret = pkg.list_pkgs() + assert isinstance(pkg_list_ret, dict) + assert installed_pkg_name in pkg_list_ret + + +def test_latest_version(pkg, installed_pkg_name): + """ + Test pkg.latest_version + """ + result = pkg.latest_version(installed_pkg_name, refresh=False) + assert isinstance(result, dict) + assert installed_pkg_name in result.data + + +def test_remove(pkg, installed_pkg_name): + """ + Test pkg.remove + """ + ret = pkg.remove(installed_pkg_name) + assert isinstance(ret, dict) + assert installed_pkg_name in ret + + +@pytest.mark.destructive_test +def test_install(pkg, uninstalled_pkg_name): + """ + Test pkg.install + """ + ret = pkg.install(uninstalled_pkg_name) + assert isinstance(ret, dict) + assert uninstalled_pkg_name in ret + + +def test_list_upgrades_type(pkg): + """ + Test pkg.list_upgrades return type + """ + ret = pkg.list_upgrades(refresh=False) + assert isinstance(ret, dict) + + +def test_upgrade_available(pkg, installed_pkg_name): + """ + Test pkg.upgrade_available + """ + ret = pkg.upgrade_available(installed_pkg_name, refresh=False) + assert not ret.data + + +def test_refresh_db(pkg): + """ + Test pkg.refresh_db + """ + ret = pkg.refresh_db() + assert ret + + +def test_upgrade(pkg): + """ + Test pkg.upgrade + """ + ret = pkg.upgrade(refresh=False) + assert isinstance(ret, dict) + assert ret.data["result"] diff --git a/tests/pytests/functional/modules/test_mac_power.py b/tests/pytests/functional/modules/test_mac_power.py new file mode 100644 index 000000000000..fc444c222326 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_power.py @@ -0,0 +1,339 @@ +""" +integration tests for mac_power +""" + +import pytest + +from salt.exceptions import CommandExecutionError, SaltInvocationError + +pytestmark = [ + pytest.mark.skip_if_binaries_missing("systemsetup"), + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def power(modules): + return modules.power + + +@pytest.fixture +def _reset_computer_sleep(power): + ret = power.get_computer_sleep() + try: + yield + finally: + power.set_computer_sleep(ret) + + +@pytest.fixture +def _reset_display_sleep(power): + ret = power.get_display_sleep() + try: + yield + finally: + power.set_display_sleep(ret) + + +@pytest.fixture +def _reset_harddisk_sleep(power): + ret = power.get_harddisk_sleep() + try: + yield + finally: + power.set_harddisk_sleep(ret) + + +@pytest.fixture +def _reset_restart_power_failure(power): + try: + ret = power.get_restart_power_failure() + if not isinstance(ret, bool): + assert "Error" in ret + pytest.skip(f"Error while calling `get_restart_power_failure()`: {ret}") + except CommandExecutionError as exc: + if "Not supported on this machine" in str(exc): + pytest.skip("Restart After Power Failure: Not supported on this machine.") + try: + yield + finally: + if isinstance(ret, bool): + if ret: + ret = power.set_restart_power_failure("On") + assert ret + else: + ret = power.set_restart_power_failure("Off") + assert ret + + +@pytest.fixture +def _reset_sleep_on_power_button(power): + try: + ret = power.get_sleep_on_power_button() + if not isinstance(ret, bool): + functionality_available = False + else: + functionality_available = True + except CommandExecutionError as exc: + functionality_available = False + + if functionality_available is False: + pytest.skip("Skipping. sleep_on_power_button unavailable.") + + try: + yield + finally: + power.set_sleep_on_power_button(ret) + + +@pytest.fixture +def _reset_wake_on_modem(power): + try: + ret = power.get_wake_on_modem() + if not isinstance(ret, bool): + functionality_available = False + else: + functionality_available = True + except CommandExecutionError as exc: + functionality_available = False + + if functionality_available is False: + pytest.skip("Skipping. wake_on_modem unavailable.") + + try: + yield + finally: + power.set_wake_on_modem(ret) + + +@pytest.fixture +def _reset_wake_on_network(power): + try: + ret = power.get_wake_on_network() + if not isinstance(ret, bool): + assert "Error" in ret + pytest.skip(f"Error while calling `get_wake_on_network()`: {ret}") + except CommandExecutionError as exc: + if "Not supported on this machine" in str(exc): + pytest.skip("Wake On Network Access: Not supported on this machine") + try: + yield + finally: + if isinstance(ret, bool): + ret = power.set_wake_on_network(ret) + assert ret + + +@pytest.mark.usefixtures("_reset_computer_sleep") +def test_computer_sleep(power): + """ + Test power.get_computer_sleep + Test power.set_computer_sleep + """ + + # Normal Functionality + ret = power.set_computer_sleep(90) + assert ret + + ret = power.get_computer_sleep() + assert ret == "after 90 minutes" + + ret = power.set_computer_sleep("Off") + assert ret + + ret = power.get_computer_sleep() + assert ret == "Never" + + # Test invalid input + with pytest.raises(SaltInvocationError) as exc: + power.set_computer_sleep("spongebob") + assert "Invalid String Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_computer_sleep(0) + assert "Invalid Integer Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_computer_sleep(181) + assert "Invalid Integer Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_computer_sleep(True) + assert "Invalid Boolean Value for Minutes" in str(exc.value) + + +@pytest.mark.usefixtures("_reset_display_sleep") +def test_display_sleep(power): + """ + Test power.get_display_sleep + Test power.set_display_sleep + """ + + # Normal Functionality + ret = power.set_display_sleep(90) + assert ret + + ret = power.get_display_sleep() + assert ret == "after 90 minutes" + + ret = power.set_display_sleep("Off") + assert ret + + ret = power.get_display_sleep() + assert ret == "Never" + + # Test invalid input + with pytest.raises(SaltInvocationError) as exc: + power.set_display_sleep("spongebob") + assert "Invalid String Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_display_sleep(0) + assert "Invalid Integer Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_display_sleep(181) + assert "Invalid Integer Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_display_sleep(True) + assert "Invalid Boolean Value for Minutes" in str(exc.value) + + +@pytest.mark.usefixtures("_reset_harddisk_sleep") +def test_harddisk_sleep(power): + """ + Test power.get_harddisk_sleep + Test power.set_harddisk_sleep + """ + + # Normal Functionality + ret = power.set_harddisk_sleep(90) + assert ret + + ret = power.get_harddisk_sleep() + assert ret == "after 90 minutes" + + ret = power.set_harddisk_sleep("Off") + assert ret + + ret = power.get_harddisk_sleep() + assert ret == "Never" + + # Test invalid input + with pytest.raises(SaltInvocationError) as exc: + power.set_harddisk_sleep("spongebob") + assert "Invalid String Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_harddisk_sleep(0) + assert "Invalid Integer Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_harddisk_sleep(181) + assert "Invalid Integer Value for Minutes" in str(exc.value) + + with pytest.raises(SaltInvocationError) as exc: + power.set_harddisk_sleep(True) + assert "Invalid Boolean Value for Minutes" in str(exc.value) + + +def test_restart_freeze(power): + """ + Test power.get_restart_freeze + Test power.set_restart_freeze + """ + # Normal Functionality + ret = power.set_restart_freeze("on") + assert ret + + ret = power.get_restart_freeze() + assert ret + + # This will return False because mac fails to actually make the change + ret = power.set_restart_freeze("off") + assert not ret + + # Even setting to off returns true, it actually is never set + # This is an apple bug + ret = power.get_restart_freeze() + assert ret + + +@pytest.mark.usefixtures("_reset_restart_power_failure") +def test_restart_power_failure(power): + """ + Test power.get_restart_power_failure + Test power.set_restart_power_failure + """ + ret = power.set_restart_power_failure("On") + assert ret + + ret = power.get_restart_power_failure() + assert ret + + ret = power.set_restart_power_failure("Off") + assert ret + + ret = power.get_restart_power_failure() + assert not ret + + +@pytest.mark.usefixtures("_reset_sleep_on_power_button") +def test_sleep_on_power_button(power): + """ + Test power.get_sleep_on_power_button + Test power.set_sleep_on_power_button + """ + ret = power.set_sleep_on_power_button("on") + assert ret + + ret = power.get_sleep_on_power_button() + assert ret + + ret = power.set_sleep_on_power_button("off") + assert ret + + ret = power.get_sleep_on_power_button() + assert not ret + + +@pytest.mark.usefixtures("_reset_wake_on_modem") +def test_wake_on_modem(power): + """ + Test power.get_wake_on_modem + Test power.set_wake_on_modem + """ + ret = power.set_wake_on_modem("on") + assert ret + + ret = power.get_wake_on_modem() + assert ret + + ret = power.set_wake_on_modem("off") + assert ret + + ret = power.get_wake_on_modem() + assert not ret + + +@pytest.mark.usefixtures("_reset_wake_on_network") +def test_wake_on_network(power): + """ + Test power.get_wake_on_network + Test power.set_wake_on_network + """ + ret = power.set_wake_on_network("on") + assert ret + + ret = power.get_wake_on_network() + assert ret + + ret = power.set_wake_on_network("off") + assert ret + + ret = power.get_wake_on_network() + assert not ret diff --git a/tests/pytests/functional/modules/test_mac_service.py b/tests/pytests/functional/modules/test_mac_service.py new file mode 100644 index 000000000000..0aa38a8ac8ea --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_service.py @@ -0,0 +1,252 @@ +""" +integration tests for mac_service +""" + +import plistlib + +import pytest + +import salt.utils.files +from salt.exceptions import CommandExecutionError + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.skip_if_binaries_missing("launchctl", "plutil"), + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def service(modules): + return modules.service + + +@pytest.fixture(scope="function", autouse=True) +def service_name(service): + + service_name = "com.salt.integration.test" + service_path = "/Library/LaunchDaemons/com.salt.integration.test.plist" + + service_data = { + "KeepAlive": True, + "Label": service_name, + "ProgramArguments": ["/bin/sleep", "1000"], + "RunAtLoad": True, + } + with salt.utils.files.fopen(service_path, "wb") as fp: + plistlib.dump(service_data, fp) + service.enable(service_name) + service.start(service_name) + + try: + yield service_name + finally: + # Try to stop the service if it's running + try: + service.stop(service_name) + except CommandExecutionError: + pass + salt.utils.files.safe_rm(service_path) + + +def test_show(service, service_name): + """ + Test service.show + """ + # Existing Service + service_info = service.show(service_name) + assert isinstance(service_info, dict) + assert service_info["plist"]["Label"] == service_name + + # Missing Service + with pytest.raises(CommandExecutionError) as exc: + ret = service.show("spongebob") + assert "Service not found" in str(exc.value) + + +def test_launchctl(service, service_name): + """ + Test service.launchctl + """ + # Expected Functionality + ret = service.launchctl("error", "bootstrap", 64) + assert ret + + ret = service.launchctl("error", "bootstrap", 64, return_stdout=True) + assert ret == "64: unknown error code" + + # Raise an error + with pytest.raises(CommandExecutionError) as exc: + ret = service.launchctl("error", "bootstrap") + assert "Failed to error service" in str(exc.value) + + +def test_list(service, service_name): + """ + Test service.list + """ + # Expected Functionality + ret = service.list() + assert "PID" in ret + ret = service.list(service_name) + assert "{" in ret + + # Service not found + with pytest.raises(CommandExecutionError) as exc: + ret = service.list("spongebob") + assert "Service not found" in str(exc.value) + + +def test_enable(service, service_name): + """ + Test service.enable + """ + ret = service.enable(service_name) + assert ret + + with pytest.raises(CommandExecutionError) as exc: + ret = service.enable("spongebob") + assert "Service not found" in str(exc.value) + + +def test_disable(service, service_name): + """ + Test service.disable + """ + ret = service.disable(service_name) + assert ret + + with pytest.raises(CommandExecutionError) as exc: + ret = service.disable("spongebob") + assert "Service not found" in str(exc.value) + + +def test_start(service, service_name): + """ + Test service.start + Test service.stop + Test service.status + """ + service.stop(service_name) + ret = service.start(service_name) + assert ret + + with pytest.raises(CommandExecutionError) as exc: + ret = service.start("spongebob") + assert "Service not found" in str(exc.value) + + +def test_stop(service, service_name): + """ + Test service.stop + """ + ret = service.stop(service_name) + assert ret + + with pytest.raises(CommandExecutionError) as exc: + ret = service.stop("spongebob") + assert "Service not found" in str(exc.value) + + service.start(service_name) + + +def test_status(service, service_name): + """ + Test service.status + """ + # A running service + ret = service.status(service_name) + assert ret + + # A stopped service + service.stop(service_name) + ret = service.status(service_name) + assert not ret + + # Service not found + ret = service.status("spongebob") + assert not ret + + service.start(service_name) + + +def test_available(service, service_name): + """ + Test service.available + """ + ret = service.available(service_name) + assert ret + + ret = service.available("spongebob") + assert not ret + + +def test_missing(service, service_name): + """ + Test service.missing + """ + ret = service.missing(service_name) + assert not ret + + ret = service.missing("spongebob") + assert ret + + +def test_enabled(service, service_name): + """ + Test service.enabled + """ + service.disabled(service_name) + ret = service.enabled(service_name) + assert ret + + with pytest.raises(CommandExecutionError) as exc: + ret = service.enabled("spongebob") + assert "Service not found: spongebob" in str(exc.value) + + +def test_disabled(service, service_name): + """ + Test service.disabled + """ + ret = service.disabled(service_name) + assert not ret + + ret = service.disable(service_name) + assert ret + + ret = service.disabled(service_name) + assert ret + + ret = service.enable(service_name) + assert ret + + with pytest.raises(CommandExecutionError) as exc: + ret = service.disable("spongebob") + assert "Service not found: spongebob" in str(exc.value) + + +def test_get_all(service, service_name): + """ + Test service.get_all + """ + services = service.get_all() + assert isinstance(services, list) + assert service_name in services + + +def test_get_enabled(service, service_name): + """ + Test service.get_enabled + """ + services = service.get_enabled() + assert isinstance(services, list) + assert service_name in services + + +def test_service_laoded(service, service_name): + """ + Test service.get_enabled + """ + ret = service.loaded(service_name) + assert ret diff --git a/tests/pytests/functional/modules/test_mac_shadow.py b/tests/pytests/functional/modules/test_mac_shadow.py new file mode 100644 index 000000000000..69ba93fa839f --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_shadow.py @@ -0,0 +1,174 @@ +""" +integration tests for mac_shadow +""" + +import datetime +import types + +import pytest +from saltfactories.utils import random_string + +from salt.exceptions import CommandExecutionError + +pytestmark = [ + pytest.mark.skip_if_binaries_missing("dscl", "pwpolicy"), + pytest.mark.slow_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def shadow(modules): + return modules.shadow + + +@pytest.fixture +def accounts(): + with pytest.helpers.create_account(create_group=True) as _account: + yield types.SimpleNamespace( + existing=_account.username, + non_existing=random_string("account-", lowercase=False), + ) + + +def test_info(shadow, accounts): + """ + Test shadow.info + """ + # Correct Functionality + ret = shadow.info(accounts.existing) + assert ret["name"] == accounts.existing + + # User does not exist + ret = shadow.info(accounts.non_existing) + assert ret["name"] == "" + + +def test_get_last_change(shadow, accounts): + """ + Test shadow.get_last_change + """ + # Correct Functionality + text_date = shadow.get_last_change(accounts.existing) + assert text_date != "Invalid Timestamp" + obj_date = datetime.datetime.strptime(text_date, "%Y-%m-%d %H:%M:%S") + assert isinstance(obj_date, datetime.date) + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.get_last_change(accounts.non_existing) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + +def test_get_login_failed_last(shadow, accounts): + """ + Test shadow.get_login_failed_last + """ + # Correct Functionality + text_date = shadow.get_login_failed_last(accounts.existing) + assert text_date != "Invalid Timestamp" + obj_date = datetime.datetime.strptime(text_date, "%Y-%m-%d %H:%M:%S") + assert isinstance(obj_date, datetime.date) + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.get_login_failed_last(accounts) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + +def test_get_login_failed_count(shadow, accounts): + """ + Test shadow.get_login_failed_count + """ + # Correct Functionality + assert shadow.get_login_failed_count(accounts.existing) == "0" + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.get_login_failed_count(accounts.non_existing) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + +def test_get_set_maxdays(shadow, accounts): + """ + Test shadow.get_maxdays + Test shadow.set_maxdays + """ + # Correct Functionality + assert shadow.set_maxdays(accounts.existing, 20) + assert shadow.get_maxdays(accounts.existing) == 20 + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.set_maxdays(accounts.non_existing, 7) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + with pytest.raises(CommandExecutionError) as exc: + shadow.get_maxdays(accounts.non_existing) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + +def test_get_set_change(shadow, accounts): + """ + Test shadow.get_change + Test shadow.set_change + """ + # Correct Functionality + assert shadow.set_change(accounts.existing, "02/11/2011") + assert shadow.get_change(accounts.existing) == "02/11/2011" + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.set_change(accounts.non_existing, "02/11/2012") + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + with pytest.raises(CommandExecutionError) as exc: + shadow.get_change(accounts.non_existing) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + +def test_get_set_expire(shadow, accounts): + """ + Test shadow.get_expire + Test shadow.set_expire + """ + # Correct Functionality + assert shadow.set_expire(accounts.existing, "02/11/2011") + assert shadow.get_expire(accounts.existing) == "02/11/2011" + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.set_expire(accounts.non_existing, "02/11/2012") + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + with pytest.raises(CommandExecutionError) as exc: + shadow.get_expire(accounts.non_existing) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + +def test_del_password(shadow, accounts): + """ + Test shadow.del_password + """ + # Correct Functionality + assert shadow.del_password(accounts.existing) + assert shadow.info(accounts.existing)["passwd"] == "*" + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.del_password(accounts.non_existing) + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) + + +def test_set_password(shadow, accounts): + """ + Test shadow.set_password + """ + # Correct Functionality + assert shadow.set_password(accounts.existing, "Pa$$W0rd") + + # User does not exist + with pytest.raises(CommandExecutionError) as exc: + shadow.set_password(accounts.non_existing, "P@SSw0rd") + assert f"ERROR: User not found: {accounts.non_existing}" in str(exc.value) diff --git a/tests/pytests/functional/modules/test_mac_softwareupdate.py b/tests/pytests/functional/modules/test_mac_softwareupdate.py new file mode 100644 index 000000000000..8cc839f07968 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_softwareupdate.py @@ -0,0 +1,193 @@ +""" +integration tests for mac_softwareupdate +""" + +import pytest + +from salt.exceptions import SaltInvocationError + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.skip_if_binaries_missing("softwareupdate"), + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def softwareupdate(modules): + return modules.softwareupdate + + +@pytest.fixture +def _reset_schedule_enabled(softwareupdate): + ret = softwareupdate.schedule_enabled() + try: + yield + finally: + softwareupdate.schedule_enable(ret) + + +@pytest.fixture +def _reset_catalog(softwareupdate): + ret = softwareupdate.get_catalog() + try: + yield + finally: + if ret == "Default": + softwareupdate.reset_catalog() + else: + softwareupdate.set_catalog(ret) + + +@pytest.fixture +def _reset_ignored(softwareupdate): + ret = softwareupdate.list_ignored() or () + try: + yield + finally: + for item in ret: + softwareupdate.ignore(item) + + +def test_list_available(softwareupdate): + """ + Test softwareupdate.list_available + """ + # Can't predict what will be returned, so can only test that the return + # is the correct type, dict + ret = softwareupdate.list_available() + assert isinstance(ret, dict) + + +@pytest.mark.usefixtures("_reset_ignored") +@pytest.mark.skip(reason="Ignore removed from latest OS X.") +def test_ignore(softwareupdate): + """ + Test softwareupdate.ignore + Test softwareupdate.list_ignored + Test softwareupdate.reset_ignored + """ + # Test reset_ignored + ret = softwareupdate.reset_ignored() + assert ret + + ret = softwareupdate.list_ignored() + assert ret == [] + + # Test ignore + ret = softwareupdate.ignore("spongebob") + assert ret + + ret = softwareupdate.ignore("squidward") + assert ret + + # Test list_ignored and verify ignore + ret = softwareupdate.list_ignored() + assert "spongebob" in ret + + ret = softwareupdate.list_ignored() + assert "squidward" in ret + + +@pytest.mark.usefixtures("_reset_schedule_enabled") +@pytest.mark.skip(reason="Ignore schedule support removed from latest OS X.") +def test_schedule(softwareupdate): + """ + Test softwareupdate.schedule_enable + Test softwareupdate.schedule_enabled + """ + # Test enable + ret = softwareupdate.schedule_enable(True) + assert ret + + ret = softwareupdate.schedule_enabled() + assert ret + + # Test disable in case it was already enabled + ret = softwareupdate.schedule_enable(False) + assert not ret + + ret = softwareupdate.schedule_enabled() + assert not ret + + +def test_update(softwareupdate): + """ + Test softwareupdate.update_all + Test softwareupdate.update + Test softwareupdate.update_available + + Need to know the names of updates that are available to properly test + the update functions... + """ + # There's no way to know what the dictionary will contain, so all we can + # check is that the return is a dictionary + ret = softwareupdate.update_all() + assert isinstance(ret, dict) + + # Test update_available + ret = softwareupdate.update_available("spongebob") + assert not ret + + # Test update not available + with pytest.raises(SaltInvocationError) as exc: + ret = softwareupdate.update("spongebob") + assert "Update not available" in str(exc.value) + + +def test_list_downloads(softwareupdate): + """ + Test softwareupdate.list_downloads + """ + ret = softwareupdate.list_downloads() + assert isinstance(ret, list) + + +def test_download(softwareupdate): + """ + Test softwareupdate.download + + Need to know the names of updates that are available to properly test + the download function + """ + # Test update not available + with pytest.raises(SaltInvocationError) as exc: + softwareupdate.download("spongebob") + assert "Update not available" in str(exc.value) + + +def test_download_all(softwareupdate): + """ + Test softwareupdate.download_all + """ + ret = softwareupdate.download_all() + assert isinstance(ret, list) + + +@pytest.mark.usefixtures("_reset_catalog") +@pytest.mark.skip(reason="Ignore catalog support removed from latest OS X.") +def test_get_set_reset_catalog(softwareupdate): + """ + Test softwareupdate.download_all + """ + # Reset the catalog + ret = softwareupdate.reset_catalog() + assert ret + + ret = softwareupdate.get_catalog() + assert ret == "Default" + + # Test setting and getting the catalog + ret = softwareupdate.set_catalog("spongebob") + assert ret + + ret = softwareupdate.get_catalog() + assert ret == "spongebob" + + # Test reset the catalog + ret = softwareupdate.reset_catalog() + assert ret + + assert softwareupdate.get_catalog() + assert ret == "Default" diff --git a/tests/pytests/functional/modules/test_mac_system.py b/tests/pytests/functional/modules/test_mac_system.py index b579f67c1bca..bf78b8e61756 100644 --- a/tests/pytests/functional/modules/test_mac_system.py +++ b/tests/pytests/functional/modules/test_mac_system.py @@ -56,10 +56,10 @@ def _remote_events_cleanup(system, grains): @pytest.fixture -def _subnet_cleanup(system): +def subnet_name(system): subnet_name = system.get_subnet_name() try: - yield + yield random_string("subnet-", lowercase=False) finally: if system.get_subnet_name() != subnet_name: system.set_subnet_name(subnet_name) @@ -76,26 +76,15 @@ def _keyboard_cleanup(system): @pytest.fixture -def _computer_name_cleanup(system): +def computer_name(system): computer_name = system.get_computer_name() try: - yield + yield random_string("cmptr-", lowercase=False) finally: if system.get_computer_name() != computer_name: system.set_computer_name(computer_name) -@pytest.fixture(autouse=True) -def _setup_teardown_vars(service, system): - atrun_enabled = service.enabled("com.apple.atrun") - try: - yield - finally: - if not atrun_enabled: - atrun = "/System/Library/LaunchDaemons/com.apple.atrun.plist" - service.stop(atrun) - - @pytest.mark.usefixtures("_remote_login_cleanup") def test_get_set_remote_login(system): """ @@ -197,19 +186,16 @@ def test_get_set_remote_events(system): assert "Invalid String Value for Enabled" in str(exc.value) -@pytest.mark.usefixtures("_subnet_cleanup") -def test_get_set_subnet_name(system): +def test_get_set_subnet_name(system, subnet_name): """ Test system.get_subnet_name Test system.set_subnet_name """ - set_subnet_name = random_string("RS-", lowercase=False) - - ret = system.set_subnet_name(set_subnet_name) + ret = system.set_subnet_name(subnet_name) assert ret ret = system.get_subnet_name() - assert ret == set_subnet_name + assert ret == subnet_name @pytest.mark.skip_initial_gh_actions_failure @@ -336,21 +322,17 @@ def test_get_set_boot_arch(system): # investigate # @pytest.mark.skipif(salt.utils.platform.is_darwin() and six.PY3, reason='This test hangs on OS X on Py3. Skipping until #53566 is merged.') @pytest.mark.destructive_test -@pytest.mark.usefixtures("_computer_name_cleanup") -def test_get_set_computer_name(system): +def test_get_set_computer_name(system, computer_name): """ Test system.get_computer_name Test system.set_computer_name """ - set_computer_name = random_string("RS-", lowercase=False) + current_computer_name = system.get_computer_name() + assert current_computer_name + assert current_computer_name != computer_name - computer_name = system.get_computer_name() - - log.debug("set name is %s", set_computer_name) - ret = system.set_computer_name(set_computer_name) + ret = system.set_computer_name(computer_name) assert ret ret = system.get_computer_name() - assert ret == set_computer_name - - system.set_computer_name(computer_name) + assert ret == computer_name diff --git a/tests/pytests/functional/modules/test_mac_timezone.py b/tests/pytests/functional/modules/test_mac_timezone.py new file mode 100644 index 000000000000..2f153dd9b35a --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_timezone.py @@ -0,0 +1,242 @@ +""" +Integration tests for mac_timezone + +If using parallels, make sure Time sync is turned off. Otherwise, parallels will +keep changing your date/time settings while the tests are running. To turn off +Time sync do the following: + - Go to actions -> configure + - Select options at the top and 'More Options' on the left + - Set time to 'Do not sync' +""" + +import datetime + +import pytest + +from salt.exceptions import SaltInvocationError + +pytestmark = [ + pytest.mark.skip_if_binaries_missing("systemsetup"), + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def timezone(modules): + return modules.timezone + + +@pytest.fixture +def _reset_time_server(timezone): + ret = timezone.get_time_server() + try: + yield + finally: + if timezone.get_time_server() != ret: + timezone.set_time_server(ret) + + +@pytest.fixture +def _reset_using_network_time(timezone): + ret = timezone.get_using_network_time() + try: + timezone.set_using_network_time(False) + yield ret + finally: + timezone.set_using_network_time(ret) + + +@pytest.fixture +def _reset_time(timezone, _reset_using_network_time): + ret = timezone.get_time() + try: + yield + finally: + if not _reset_using_network_time: + timezone.set_time(ret) + + +@pytest.fixture +def _reset_date(timezone, _reset_using_network_time): + ret = timezone.get_date() + try: + yield + finally: + if not _reset_using_network_time: + timezone.set_date(ret) + + +@pytest.fixture +def _reset_zone(timezone): + ret = timezone.get_zone() + try: + timezone.set_zone("America/Denver") + yield + finally: + timezone.set_zone(ret) + + +@pytest.mark.usefixtures("_reset_date") +def test_get_set_date(timezone): + """ + Test timezone.get_date + Test timezone.set_date + """ + # Correct Functionality + ret = timezone.set_date("2/20/2011") + assert ret + ret = timezone.get_date() + assert ret == "2/20/2011" + + # Test bad date format + with pytest.raises(SaltInvocationError) as exc: + ret = timezone.set_date("13/12/2014") + assert ( + "ERROR executing 'timezone.set_date': Invalid Date/Time Format: 13/12/2014" + in str(exc.value) + ) + + +@pytest.mark.slow_test +def test_get_time(timezone): + """ + Test timezone.get_time + """ + text_time = timezone.get_time() + assert text_time != "Invalid Timestamp" + obj_date = datetime.datetime.strptime(text_time, "%H:%M:%S") + assert isinstance(obj_date, datetime.date) + + +@pytest.mark.usefixtures("_reset_time") +def test_set_time(timezone): + """ + Test timezone.set_time + """ + # Correct Functionality + ret = timezone.set_time("3:14") + assert ret + + # Test bad time format + with pytest.raises(SaltInvocationError) as exc: + ret = timezone.set_time("3:71") + assert ( + "ERROR executing 'timezone.set_time': Invalid Date/Time Format: 3:71" + in str(exc.value) + ) + + +@pytest.mark.usefixtures("_reset_zone") +def test_get_set_zone(timezone): + """ + Test timezone.get_zone + Test timezone.set_zone + """ + # Correct Functionality + ret = timezone.set_zone("Pacific/Wake") + assert ret + + ret = timezone.get_zone() + assert ret == "Pacific/Wake" + + # Test bad time zone + with pytest.raises(SaltInvocationError) as exc: + ret = timezone.set_zone("spongebob") + assert ( + "ERROR executing 'timezone.set_zone': Invalid Timezone: spongebob" + in str(exc.value) + ) + + +@pytest.mark.usefixtures("_reset_zone") +def test_get_offset(timezone): + """ + Test timezone.get_offset + """ + ret = timezone.set_zone("Pacific/Wake") + assert ret + ret = timezone.get_offset() + assert isinstance(ret, str) + assert ret == "+1200" + + ret = timezone.set_zone("America/Los_Angeles") + assert ret + ret = timezone.get_offset() + assert isinstance(ret, str) + assert ret == "-0800" + + +@pytest.mark.usefixtures("_reset_zone") +def test_get_set_zonecode(timezone): + """ + Test timezone.get_zonecode + Test timezone.set_zonecode + """ + ret = timezone.set_zone("America/Los_Angeles") + assert ret + ret = timezone.get_zone() + assert isinstance(ret, str) + assert ret == "America/Los_Angeles" + + ret = timezone.set_zone("Pacific/Wake") + assert ret + ret = timezone.get_zone() + assert isinstance(ret, str) + assert ret == "Pacific/Wake" + + +@pytest.mark.slow_test +def test_list_zones(timezone): + """ + Test timezone.list_zones + """ + zones = timezone.list_zones() + assert isinstance(zones, list) + assert "America/Denver" in zones + assert "America/Los_Angeles" in zones + + +@pytest.mark.usefixtures("_reset_zone") +def test_zone_compare(timezone): + """ + Test timezone.zone_compare + """ + ret = timezone.zone_compare("America/Denver") + assert ret + ret = timezone.zone_compare("Pacific/Wake") + assert not ret + + +@pytest.mark.usefixtures("_reset_using_network_time") +def test_get_set_using_network_time(timezone): + """ + Test timezone.get_using_network_time + Test timezone.set_using_network_time + """ + ret = timezone.set_using_network_time(True) + assert ret + + ret = timezone.get_using_network_time() + assert ret + + ret = timezone.set_using_network_time(False) + assert ret + + ret = timezone.get_using_network_time() + assert not ret + + +@pytest.mark.usefixtures("_reset_time_server") +def test_get_set_time_server(timezone): + """ + Test timezone.get_time_server + Test timezone.set_time_server + """ + ret = timezone.set_time_server("spongebob.com") + assert ret + + ret = timezone.get_time_server() + assert ret == "spongebob.com" diff --git a/tests/pytests/functional/modules/test_mac_user.py b/tests/pytests/functional/modules/test_mac_user.py new file mode 100644 index 000000000000..a885e500950f --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_user.py @@ -0,0 +1,189 @@ +""" + :codeauthor: Nicole Thomas +""" + +import os + +import pytest +from saltfactories.utils import random_string + +import salt.utils.files + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def user(modules): + return modules.user + + +@pytest.fixture +def _reset_enable_auto_login(user): + # Make sure auto login is disabled before we start + if user.get_auto_login(): + pytest.skip("Auto login already enabled") + + try: + yield + finally: + # Make sure auto_login is disabled + ret = user.disable_auto_login() + assert ret + + # Make sure autologin is disabled + ret = user.get_auto_login() + if ret: + pytest.fail("Failed to disable auto login") + + +@pytest.fixture +def existing_user(user): + username = random_string("account-", uppercase=False) + try: + ret = user.add(username) + if ret is not True: + pytest.skip("Failed to create an account to manipulate") + yield username + finally: + user_info = user.info(username) + if user_info: + user.delete(username) + + +@pytest.fixture +def non_existing_user(user): + username = random_string("account-", uppercase=False) + try: + yield username + finally: + user_info = user.info(username) + if user_info: + user.delete(username) + + +def test_mac_user_add(user, non_existing_user): + """ + Tests the add function + """ + user.add(non_existing_user) + user_info = user.info(non_existing_user) + assert user_info["name"] == non_existing_user + + +def test_mac_user_delete(user, existing_user): + """ + Tests the delete function + """ + ret = user.delete(existing_user) + assert ret + + +def test_mac_user_primary_group(user, existing_user): + """ + Tests the primary_group function + """ + primary_group = user.primary_group(existing_user) + uid_info = user.info(existing_user) + assert primary_group in uid_info["groups"] + + +def test_mac_user_changes(user, existing_user): + """ + Tests mac_user functions that change user properties + """ + # Test mac_user.chuid + user.chuid(existing_user, 4376) + uid_info = user.info(existing_user) + assert uid_info["uid"] == 4376 + + # Test mac_user.chgid + user.chgid(existing_user, 4376) + gid_info = user.info(existing_user) + assert gid_info["gid"] == 4376 + + # Test mac.user.chshell + user.chshell(existing_user, "/bin/zsh") + shell_info = user.info(existing_user) + assert shell_info["shell"] == "/bin/zsh" + + # Test mac_user.chhome + user.chhome(existing_user, "/Users/foo") + home_info = user.info(existing_user) + assert home_info["home"] == "/Users/foo" + + # Test mac_user.chfullname + user.chfullname(existing_user, "Foo Bar") + fullname_info = user.info(existing_user) + assert fullname_info["fullname"] == "Foo Bar" + + # Test mac_user.chgroups + ret = user.info(existing_user) + pre_info = ret["groups"] + expected = pre_info + ["wheel"] + user.chgroups(existing_user, "wheel") + groups_info = user.info(existing_user) + assert groups_info["groups"] == expected + + +@pytest.mark.usefixtures("_reset_enable_auto_login") +def test_mac_user_enable_auto_login(user): + """ + Tests mac_user functions that enable auto login + """ + # Does enable return True + ret = user.enable_auto_login("Spongebob", "Squarepants") + assert ret + + # Did it set the user entry in the plist file + ret = user.get_auto_login() + assert ret == "Spongebob" + + # Did it generate the `/etc/kcpassword` file + assert os.path.exists("/etc/kcpassword") + + # Are the contents of the file correct + test_data = bytes.fromhex("2e f8 27 42 a0 d9 ad 8b cd cd 6c 7d") + with salt.utils.files.fopen("/etc/kcpassword", "rb") as f: + file_data = f.read() + assert test_data == file_data + + # Does disable return True + ret = user.disable_auto_login() + assert ret + + # Does it remove the user entry in the plist file + ret = user.get_auto_login() + assert not ret + + # Is the `/etc/kcpassword` file removed + assert not os.path.exists("/etc/kcpassword") + + +@pytest.mark.usefixtures("_reset_enable_auto_login") +def test_mac_user_disable_auto_login(user): + """ + Tests mac_user functions that disable auto login + """ + # Enable auto login for the test + user.enable_auto_login("Spongebob", "Squarepants") + + # Make sure auto login got set up + ret = user.get_auto_login() + if not ret == "Spongebob": + raise pytest.fail("Failed to enable auto login") + + # Does disable return True + ret = user.disable_auto_login() + assert ret + + # Does it remove the user entry in the plist file + ret = user.get_auto_login() + assert not ret + + # Is the `/etc/kcpassword` file removed + assert not os.path.exists("/etc/kcpassword") diff --git a/tests/pytests/functional/modules/test_mac_xattr.py b/tests/pytests/functional/modules/test_mac_xattr.py new file mode 100644 index 000000000000..9a91576f2858 --- /dev/null +++ b/tests/pytests/functional/modules/test_mac_xattr.py @@ -0,0 +1,176 @@ +""" +integration tests for mac_xattr +""" + +import pytest + +from salt.exceptions import CommandExecutionError + +pytestmark = [ + pytest.mark.skip_if_binaries_missing("xattr"), + pytest.mark.slow_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="module") +def xattr(modules): + return modules.xattr + + +@pytest.fixture +def existing_file(tmp_path): + fpath = tmp_path / "xattr_test_file.txt" + fpath.touch() + return fpath + + +@pytest.fixture +def non_existing_file(tmp_path): + return tmp_path / "xattr_no_file" + + +def test_list_no_xattr(xattr, existing_file, non_existing_file): + """ + Make sure there are no attributes + """ + # Clear existing attributes + ret = xattr.clear(existing_file) + assert ret + + # Test no attributes + ret = xattr.list(existing_file) + assert ret == {} + + # Test file not found + with pytest.raises(CommandExecutionError) as exc: + ret = xattr.list(non_existing_file) + assert f"File not found: {non_existing_file}" in str(exc.value) + + +def test_write(xattr, existing_file, non_existing_file): + """ + Write an attribute + """ + # Clear existing attributes + ret = xattr.clear(existing_file) + assert ret + + # Write some attributes + ret = xattr.write(existing_file, "spongebob", "squarepants") + assert ret + + ret = xattr.write(existing_file, "squidward", "plankton") + assert ret + + ret = xattr.write(existing_file, "crabby", "patty") + assert ret + + # Test that they were actually added + ret = xattr.list(existing_file) + assert ret == { + "spongebob": "squarepants", + "squidward": "plankton", + "crabby": "patty", + } + + # Test file not found + with pytest.raises(CommandExecutionError) as exc: + ret = xattr.write(non_existing_file, "patrick", "jellyfish") + assert f"File not found: {non_existing_file}" in str(exc.value) + + +def test_read(xattr, existing_file, non_existing_file): + """ + Test xattr.read + """ + # Clear existing attributes + ret = xattr.clear(existing_file) + assert ret + + # Write an attribute + ret = xattr.write(existing_file, "spongebob", "squarepants") + assert ret + + # Read the attribute + ret = xattr.read(existing_file, "spongebob") + assert ret == "squarepants" + + # Test file not found + with pytest.raises(CommandExecutionError) as exc: + ret = xattr.read(non_existing_file, "spongebob") + assert f"File not found: {non_existing_file}" in str(exc.value) + + # Test attribute not found + with pytest.raises(CommandExecutionError) as exc: + ret = xattr.read(existing_file, "patrick") + assert "Attribute not found: patrick" in str(exc.value) + + +def test_delete(xattr, existing_file, non_existing_file): + """ + Test xattr.delete + """ + # Clear existing attributes + ret = xattr.clear(existing_file) + assert ret + + # Write some attributes + ret = xattr.write(existing_file, "spongebob", "squarepants") + assert ret + + ret = xattr.write(existing_file, "squidward", "plankton") + assert ret + + ret = xattr.write(existing_file, "crabby", "patty") + assert ret + + # Delete an attribute + ret = xattr.delete(existing_file, "squidward") + assert ret + + # Make sure it was actually deleted + ret = xattr.list(existing_file) + assert ret == { + "spongebob": "squarepants", + "crabby": "patty", + } + + # Test file not found + with pytest.raises(CommandExecutionError) as exc: + ret = xattr.delete(non_existing_file, "spongebob") + assert f"File not found: {non_existing_file}" in str(exc.value) + + # Test attribute not found + with pytest.raises(CommandExecutionError) as exc: + ret = xattr.delete(existing_file, "patrick") + assert "Attribute not found: patrick" in str(exc.value) + + +def test_clear(xattr, existing_file, non_existing_file): + """ + Test xattr.clear + """ + # Clear existing attributes + ret = xattr.clear(existing_file) + assert ret + + # Write some attributes + ret = xattr.write(existing_file, "spongebob", "squarepants") + assert ret + + ret = xattr.write(existing_file, "squidward", "plankton") + assert ret + + ret = xattr.write(existing_file, "crabby", "patty") + assert ret + + # Test Clear + ret = xattr.clear(existing_file) + assert ret + + # Test file not found + with pytest.raises(CommandExecutionError) as exc: + ret = xattr.clear(non_existing_file) + assert f"File not found: {non_existing_file}" in str(exc.value) diff --git a/tests/pytests/integration/modules/test_mac_sysctl.py b/tests/pytests/integration/modules/test_mac_sysctl.py new file mode 100644 index 000000000000..a71a96f85d6b --- /dev/null +++ b/tests/pytests/integration/modules/test_mac_sysctl.py @@ -0,0 +1,188 @@ +""" + :codeauthor: Nicole Thomas +""" + +import os +import random + +import pytest + +import salt.utils.files +from salt.exceptions import CommandExecutionError + +pytestmark = [ + pytest.mark.slow_test, + pytest.mark.destructive_test, + pytest.mark.skip_if_not_root, + pytest.mark.skip_unless_on_darwin, +] + + +@pytest.fixture(scope="function") +def assign_cmd(): + return "net.inet.icmp.timestamp" + + +@pytest.fixture(scope="function") +def config_file(): + return "/etc/sysctl.conf" + + +@pytest.fixture(scope="function") +def setup_teardown_vars(salt_call_cli, assign_cmd, config_file): + has_conf = False + ret = salt_call_cli.run("sysctl.get", assign_cmd, config_file) + val = ret.data + + if val is None: + pytest.skip(f"The call 'sysctl.get {assign_cmd}' returned: None") + + # If sysctl file is present, make a copy + # Remove original file so we can replace it with test files + if os.path.isfile(config_file): + has_conf = True + try: + temp_sysctl_config = __copy_sysctl(config_file) + except CommandExecutionError: + msg = "Could not copy file: {0}" + raise CommandExecutionError(msg.format(config_file)) + os.remove(config_file) + + try: + yield val + finally: + ret = salt_call_cli.run("sysctl.get", assign_cmd) + if ret.data != val: + salt_call_cli.run("sysctl.assign", assign_cmd, val) + + if has_conf is True: + # restore original sysctl file + __restore_sysctl(config_file, temp_sysctl_config) + + if has_conf is False and os.path.isfile(config_file): + # remove sysctl.conf created by tests + os.remove(temp_sysctl_config) + + +def test_assign(salt_call_cli, assign_cmd, setup_teardown_vars): + """ + Tests assigning a single sysctl parameter + """ + val = setup_teardown_vars[0] + + try: + rand = random.randint(0, 500) + while rand == val: + rand = random.randint(0, 500) + salt_call_cli.run("sysctl.assign", assign_cmd, rand) + ret = int(salt_call_cli.run("sysctl.get", assign_cmd)) + info = int(ret.data) + try: + assert rand == info + except AssertionError: + salt_call_cli.run("sysctl.assign", assign_cmd, val) + raise + except CommandExecutionError: + salt_call_cli.run("sysctl.assign", assign_cmd, val) + raise + + +def test_persist_new_file(salt_call_cli, assign_cmd, config_file): + """ + Tests assigning a sysctl value to a system without a sysctl.conf file + """ + # Always start with a clean/known sysctl.conf state + if os.path.isfile(config_file): + os.remove(config_file) + try: + salt_call_cli.run("sysctl.persist", assign_cmd, 10) + line = f"{assign_cmd}={10}" + found = __check_string(config_file, line) + assert found + except CommandExecutionError: + os.remove(config_file) + raise + + +def test_persist_already_set(salt_call_cli, config_file, setup_teardown_vars): + """ + Tests assigning a sysctl value that is already set in sysctl.conf file + """ + # Always start with a clean/known sysctl.conf state + if os.path.isfile(config_file): + os.remove(config_file) + try: + salt_call_cli.run("sysctl.persist", assign_cmd, 50) + ret = salt_call_cli.run("sysctl.persist", assign_cmd, 50) + assert ret.data == "Already set" + except CommandExecutionError: + os.remove(config_file) + raise + + +def test_persist_apply_change( + salt_call_cli, assign_cmd, config_file, setup_teardown_vars +): + """ + Tests assigning a sysctl value and applying the change to system + """ + val = setup_teardown_vars[0] + + # Always start with a clean/known sysctl.conf state + if os.path.isfile(config_file): + os.remove(config_file) + try: + rand = random.randint(0, 500) + while rand == val: + rand = random.randint(0, 500) + salt_call_cli.run("sysctl.persist", assign_cmd, rand, apply_change=True) + ret = salt_call_cli.run("sysctl.get", assign_cmd) + info = int(ret.data) + assert info == rand + except CommandExecutionError: + os.remove(config_file) + raise + + +def __copy_sysctl(CONFIG): + """ + Copies an existing sysconf file and returns temp file path. Copied + file will be restored in tearDown + """ + # Create new temporary file path and open needed files + temp_path = salt.utils.files.mkstemp() + with salt.utils.files.fopen(CONFIG, "r") as org_conf: + with salt.utils.files.fopen(temp_path, "w") as temp_sysconf: + # write sysctl lines to temp file + for line in org_conf: + temp_sysconf.write(line) + return temp_path + + +def __restore_sysctl(sysctl_config, temp_sysctl_config): + """ + Restores the original sysctl.conf file from temporary copy + """ + # If sysctl testing file exists, delete it + if os.path.isfile(sysctl_config): + os.remove(sysctl_config) + + # write temp lines to sysctl file to restore + with salt.utils.files.fopen(temp_sysctl_config, "r") as temp_sysctl: + with salt.utils.files.fopen(sysctl_config, "w") as sysctl: + for line in temp_sysctl: + sysctl.write(line) + + # delete temporary file + os.remove(temp_sysctl_config) + + +def __check_string(conf_file, to_find): + """ + Returns True if given line is present in file + """ + with salt.utils.files.fopen(conf_file, "r") as f_in: + for line in f_in: + if to_find in salt.utils.stringutils.to_unicode(line): + return True + return False diff --git a/tests/pytests/integration/ssh/test_pillar_compilation.py b/tests/pytests/integration/ssh/test_pillar_compilation.py index 717c17bca8e6..432075d6880c 100644 --- a/tests/pytests/integration/ssh/test_pillar_compilation.py +++ b/tests/pytests/integration/ssh/test_pillar_compilation.py @@ -214,15 +214,19 @@ def test_gpg_pillar(salt_ssh_cli): assert ret.returncode == 0 assert isinstance(ret.data, dict) assert ret.data - assert "secrets" in ret.data - assert "foo" in ret.data["secrets"] - assert "BEGIN PGP MESSAGE" not in ret.data["secrets"]["foo"] - assert ret.data["secrets"]["foo"] == "supersecret" - assert "_errors" not in ret.data + _assert_gpg_pillar(ret.data) + + +def _assert_gpg_pillar(ret): + assert "secrets" in ret + assert "foo" in ret["secrets"] + assert "BEGIN PGP MESSAGE" not in ret["secrets"]["foo"] + assert ret["secrets"]["foo"] == "supersecret" + assert "_errors" not in ret @pytest.mark.usefixtures("pillar_setup") -def test_saltutil_runner(salt_ssh_cli, salt_minion, salt_run_cli): +def test_saltutil_runner(salt_ssh_cli, salt_minion): """ Ensure that during pillar compilation, the cache dir is not overridden. For a history, see PR #50489 and issue #36796, @@ -233,9 +237,66 @@ def test_saltutil_runner(salt_ssh_cli, salt_minion, salt_run_cli): assert ret.returncode == 0 assert isinstance(ret.data, dict) assert ret.data - assert "saltutil" in ret.data - assert isinstance(ret.data["saltutil"], dict) - assert ret.data["saltutil"] - assert salt_minion.id in ret.data["saltutil"] - assert ret.data["saltutil"][salt_minion.id] is True - assert "_errors" not in ret.data + _assert_saltutil_runner_pillar(ret.data, salt_minion.id) + + +def _assert_saltutil_runner_pillar(ret, salt_minion_id): + assert "saltutil" in ret + assert isinstance(ret["saltutil"], dict) + assert ret["saltutil"] + assert salt_minion_id in ret["saltutil"] + assert ret["saltutil"][salt_minion_id] is True + assert "_errors" not in ret + + +@pytest.mark.skip_if_binaries_missing("gpg") +@pytest.mark.usefixtures("pillar_setup", "gpg_homedir") +def test_gpg_pillar_orch(salt_ssh_cli, salt_run_cli, gpg_homedir): + """ + Ensure that GPG-encrypted pillars can be decrypted when Salt-SSH is + called during an orchestration or via saltutil.cmd. + This is issue #65670. + """ + # Use salt_run_cli since the config paths are different between + # test master and test minion. + ret = salt_run_cli.run( + "salt.cmd", + "saltutil.cmd", + salt_ssh_cli.target_host, + "pillar.items", + ssh=True, + ignore_host_keys=True, + roster_file=str(salt_ssh_cli.roster_file), + ssh_priv=str(salt_ssh_cli.client_key), + ) + assert ret.returncode == 0 + assert isinstance(ret.data, dict) + assert ret.data + _assert_gpg_pillar(ret.data[salt_ssh_cli.target_host]["return"]) + + +@pytest.mark.usefixtures("pillar_setup") +def test_saltutil_runner_orch(salt_ssh_cli, salt_run_cli, salt_minion): + """ + Ensure that runner calls in the pillar succeed when Salt-SSH is + called during an orchestration or via saltutil.cmd. + This is a variant of issue #65670. + """ + # Use salt_run_cli since the config paths are different between + # test master and test minion. + ret = salt_run_cli.run( + "salt.cmd", + "saltutil.cmd", + salt_ssh_cli.target_host, + "pillar.items", + ssh=True, + ignore_host_keys=True, + roster_file=str(salt_ssh_cli.roster_file), + ssh_priv=str(salt_ssh_cli.client_key), + ) + assert ret.returncode == 0 + assert isinstance(ret.data, dict) + assert ret.data + _assert_saltutil_runner_pillar( + ret.data[salt_ssh_cli.target_host]["return"], salt_minion.id + ) diff --git a/pkg/tests/__init__.py b/tests/pytests/pkg/__init__.py similarity index 100% rename from pkg/tests/__init__.py rename to tests/pytests/pkg/__init__.py diff --git a/pkg/tests/conftest.py b/tests/pytests/pkg/conftest.py similarity index 76% rename from pkg/tests/conftest.py rename to tests/pytests/pkg/conftest.py index d550a118100a..048ad7a238db 100644 --- a/pkg/tests/conftest.py +++ b/tests/pytests/pkg/conftest.py @@ -9,19 +9,10 @@ import yaml from pytestskipmarkers.utils import platform from saltfactories.utils import random_string -from saltfactories.utils.tempfiles import SaltPillarTree, SaltStateTree import salt.config -from tests.support.helpers import ( - CODE_DIR, - TESTS_DIR, - ApiRequest, - SaltMaster, - SaltMasterWindows, - SaltPkgInstall, - TestUser, -) -from tests.support.sminion import create_sminion +from tests.conftest import CODE_DIR +from tests.support.pkg import ApiRequest, SaltMaster, SaltMasterWindows, SaltPkgInstall log = logging.getLogger(__name__) @@ -37,16 +28,6 @@ def version(install_salt): return install_salt.version -@pytest.fixture(scope="session") -def sminion(): - return create_sminion() - - -@pytest.fixture(scope="session") -def grains(sminion): - return sminion.opts["grains"].copy() - - @pytest.fixture(scope="session", autouse=True) def _system_up_to_date( grains, @@ -199,125 +180,20 @@ def salt_factories(salt_factories, salt_factories_root_dir): @pytest.fixture(scope="session") -def state_tree(): - if platform.is_windows(): - file_root = pathlib.Path("C:/salt/srv/salt") - elif platform.is_darwin(): - file_root = pathlib.Path("/opt/srv/salt") - else: - file_root = pathlib.Path("/srv/salt") - envs = { - "base": [ - str(file_root), - str(TESTS_DIR / "files"), - ], - } - tree = SaltStateTree(envs=envs) - test_sls_contents = """ - test_foo: - test.succeed_with_changes: - - name: foo - """ - states_sls_contents = """ - update: - pkg.installed: - - name: bash - salt_dude: - user.present: - - name: dude - - fullname: Salt Dude - """ - win_states_sls_contents = """ - create_empty_file: - file.managed: - - name: C://salt/test/txt - salt_dude: - user.present: - - name: dude - - fullname: Salt Dude - """ - with tree.base.temp_file("test.sls", test_sls_contents), tree.base.temp_file( - "states.sls", states_sls_contents - ), tree.base.temp_file("win_states.sls", win_states_sls_contents): - yield tree - - -@pytest.fixture(scope="session") -def pillar_tree(): +def salt_master(salt_factories, install_salt, pkg_tests_account): """ - Add pillar files + Start up a master """ if platform.is_windows(): - pillar_root = pathlib.Path("C:/salt/srv/pillar") + state_tree = "C:/salt/srv/salt" + pillar_tree = "C:/salt/srv/pillar" elif platform.is_darwin(): - pillar_root = pathlib.Path("/opt/srv/pillar") + state_tree = "/opt/srv/salt" + pillar_tree = "/opt/srv/pillar" else: - pillar_root = pathlib.Path("/srv/pillar") - pillar_root.mkdir(mode=0o777, parents=True, exist_ok=True) - tree = SaltPillarTree( - envs={ - "base": [ - str(pillar_root), - ] - }, - ) - top_file_contents = """ - base: - '*': - - test - """ - test_file_contents = """ - info: test - """ - with tree.base.temp_file("top.sls", top_file_contents), tree.base.temp_file( - "test.sls", test_file_contents - ): - yield tree - + state_tree = "/srv/salt" + pillar_tree = "/srv/pillar" -@pytest.fixture(scope="module") -def sls(state_tree): - """ - Add an sls file - """ - test_sls_contents = """ - test_foo: - test.succeed_with_changes: - - name: foo - """ - states_sls_contents = """ - update: - pkg.installed: - - name: bash - salt_dude: - user.present: - - name: dude - - fullname: Salt Dude - """ - win_states_sls_contents = """ - create_empty_file: - file.managed: - - name: C://salt/test/txt - salt_dude: - user.present: - - name: dude - - fullname: Salt Dude - """ - with state_tree.base.temp_file( - "tests.sls", test_sls_contents - ), state_tree.base.temp_file( - "states.sls", states_sls_contents - ), state_tree.base.temp_file( - "win_states.sls", win_states_sls_contents - ): - yield - - -@pytest.fixture(scope="session") -def salt_master(salt_factories, install_salt, state_tree, pillar_tree): - """ - Start up a master - """ start_timeout = None # Since the daemons are "packaged" with tiamat, the salt plugins provided # by salt-factories won't be discovered. Provide the required `*_dirs` on @@ -334,18 +210,35 @@ def salt_master(salt_factories, install_salt, state_tree, pillar_tree): config_defaults["enable_fqdns_grains"] = False config_overrides = { "timeout": 30, - "file_roots": state_tree.as_dict(), - "pillar_roots": pillar_tree.as_dict(), - "rest_cherrypy": {"port": 8000, "disable_ssl": True}, + "file_roots": { + "base": [ + state_tree, + ] + }, + "pillar_roots": { + "base": [ + pillar_tree, + ] + }, + "rest_cherrypy": { + "port": 8000, + "disable_ssl": True, + }, "netapi_enable_clients": ["local"], - "external_auth": {"auto": {"saltdev": [".*"]}}, + "external_auth": { + "auto": { + pkg_tests_account.username: [ + ".*", + ], + }, + }, "fips_mode": FIPS_TESTRUN, "open_mode": True, } test_user = False master_config = install_salt.config_path / "master" if master_config.exists(): - with open(master_config) as fp: + with salt.utils.files.fopen(master_config) as fp: data = yaml.safe_load(fp) if data and "user" in data: test_user = True @@ -439,15 +332,18 @@ def salt_master(salt_factories, install_salt, state_tree, pillar_tree): "-R", "salt:salt", str(pathlib.Path("/etc", "salt", "pki", "master")), - ] + ], + check=True, ) - # The engines_dirs is created in .nox path. We need to set correct perms - # for the user running the Salt Master - subprocess.run(["chown", "-R", "salt:salt", str(CODE_DIR.parent / ".nox")]) - file_roots = pathlib.Path("/srv/", "salt") - pillar_roots = pathlib.Path("/srv/", "pillar") - for _dir in [file_roots, pillar_roots]: - subprocess.run(["chown", "-R", "salt:salt", str(_dir)]) + + if not platform.is_windows() and not platform.is_darwin(): + # The engines_dirs is created in .nox path. We need to set correct perms + # for the user running the Salt Master + check_paths = [state_tree, pillar_tree, CODE_DIR / ".nox"] + for path in check_paths: + if os.path.exists(path) is False: + continue + subprocess.run(["chown", "-R", "salt:salt", str(path)], check=False) with factory.started(start_timeout=start_timeout): yield factory @@ -501,10 +397,13 @@ def salt_minion(salt_factories, salt_master, install_salt): # which sets root perms on /srv/salt and /srv/pillar since we are running # the test suite as root, but we want to run Salt master as salt if not platform.is_windows() and not platform.is_darwin(): - file_roots = pathlib.Path("/srv/", "salt") - pillar_roots = pathlib.Path("/srv/", "pillar") - for _dir in [file_roots, pillar_roots]: - subprocess.run(["chown", "-R", "salt:salt", str(_dir)]) + state_tree = "/srv/salt" + pillar_tree = "/srv/pillar" + check_paths = [state_tree, pillar_tree, CODE_DIR / ".nox"] + for path in check_paths: + if os.path.exists(path) is False: + continue + subprocess.run(["chown", "-R", "salt:salt", str(path)], check=False) factory.after_terminate( pytest.helpers.remove_stale_minion_key, salt_master, factory.id @@ -528,9 +427,9 @@ def salt_call_cli(salt_minion): return salt_minion.salt_call_cli() -@pytest.fixture(scope="module") -def test_account(salt_call_cli): - with TestUser(salt_call_cli=salt_call_cli) as account: +@pytest.fixture(scope="session") +def pkg_tests_account(): + with pytest.helpers.create_account() as account: yield account @@ -565,6 +464,8 @@ def salt_api(salt_master, install_salt, extras_pypath): @pytest.fixture(scope="module") -def api_request(test_account, salt_api): - with ApiRequest(salt_api=salt_api, test_account=test_account) as session: +def api_request(pkg_tests_account, salt_api): + with ApiRequest( + port=salt_api.config["rest_cherrypy"]["port"], account=pkg_tests_account + ) as session: yield session diff --git a/pkg/tests/downgrade/__init__.py b/tests/pytests/pkg/downgrade/__init__.py similarity index 100% rename from pkg/tests/downgrade/__init__.py rename to tests/pytests/pkg/downgrade/__init__.py diff --git a/pkg/tests/downgrade/test_salt_downgrade.py b/tests/pytests/pkg/downgrade/test_salt_downgrade.py similarity index 99% rename from pkg/tests/downgrade/test_salt_downgrade.py rename to tests/pytests/pkg/downgrade/test_salt_downgrade.py index 3ba0de05089f..f6a8ef17a237 100644 --- a/pkg/tests/downgrade/test_salt_downgrade.py +++ b/tests/pytests/pkg/downgrade/test_salt_downgrade.py @@ -1,5 +1,3 @@ -import shutil - import packaging.version import pytest from pytestskipmarkers.utils import platform diff --git a/pkg/tests/download/__init__.py b/tests/pytests/pkg/download/__init__.py similarity index 100% rename from pkg/tests/download/__init__.py rename to tests/pytests/pkg/download/__init__.py diff --git a/pkg/tests/download/test_pkg_download.py b/tests/pytests/pkg/download/test_pkg_download.py similarity index 98% rename from pkg/tests/download/test_pkg_download.py rename to tests/pytests/pkg/download/test_pkg_download.py index f14114d143e6..0fa9089e77dd 100644 --- a/pkg/tests/download/test_pkg_download.py +++ b/tests/pytests/pkg/download/test_pkg_download.py @@ -267,7 +267,7 @@ def setup_redhat_family( try: pytest.helpers.download_file(gpg_file_url, downloads_path / gpg_key_name) - except Exception as exc: + except Exception as exc: # pylint: disable=broad-except pytest.fail(f"Failed to download {gpg_file_url}: {exc}") ret = shell.run("rpm", "--import", str(downloads_path / gpg_key_name), check=False) @@ -333,7 +333,7 @@ def setup_debian_family( try: pytest.helpers.download_file(gpg_file_url, downloads_path / gpg_key_name) - except Exception as exc: + except Exception as exc: # pylint: disable=broad-except pytest.fail(f"Failed to download {gpg_file_url}: {exc}") salt_sources_path = downloads_path / "salt.list" @@ -384,7 +384,7 @@ def setup_debian_family( try: pytest.helpers.download_file(onedir_url, onedir_location) - except Exception as exc: + except Exception as exc: # pylint: disable=broad-except pytest.fail(f"Failed to download {onedir_url}: {exc}") shell.run("tar", "xvf", str(onedir_location), "-C", str(onedir_extracted)) @@ -439,7 +439,7 @@ def setup_macos( try: pytest.helpers.download_file(onedir_url, onedir_location) - except Exception as exc: + except Exception as exc: # pylint: disable=broad-except pytest.fail(f"Failed to download {onedir_url}: {exc}") shell.run("tar", "xvf", str(onedir_location), "-C", str(onedir_extracted)) @@ -516,7 +516,7 @@ def setup_windows( try: pytest.helpers.download_file(onedir_url, onedir_location) - except Exception as exc: + except Exception as exc: # pylint: disable=broad-except pytest.fail(f"Failed to download {onedir_url}: {exc}") shell.run("unzip", str(onedir_location), "-d", str(onedir_extracted)) diff --git a/pkg/tests/integration/__init__.py b/tests/pytests/pkg/integration/__init__.py similarity index 100% rename from pkg/tests/integration/__init__.py rename to tests/pytests/pkg/integration/__init__.py diff --git a/tests/pytests/pkg/integration/test_check_imports.py b/tests/pytests/pkg/integration/test_check_imports.py new file mode 100644 index 000000000000..eac49f48fac2 --- /dev/null +++ b/tests/pytests/pkg/integration/test_check_imports.py @@ -0,0 +1,102 @@ +import logging +import subprocess + +import pytest +from pytestskipmarkers.utils import platform +from saltfactories.utils.functional import MultiStateResult + +pytestmark = [ + pytest.mark.skip_on_windows, +] + +log = logging.getLogger(__name__) + + +CHECK_IMPORTS_SLS_CONTENTS = """ +#!py +import importlib + +def run(): + config = {} + for module in [ + 'templates', 'platform', 'cli', 'executors', 'config', 'wheel', 'netapi', + 'cache', 'proxy', 'transport', 'metaproxy', 'modules', 'tokens', 'matchers', + 'acl', 'auth', 'log', 'engines', 'client', 'returners', 'runners', 'tops', + 'output', 'daemons', 'thorium', 'renderers', 'states', 'cloud', 'roster', + 'beacons', 'pillar', 'spm', 'utils', 'sdb', 'fileserver', 'defaults', + 'ext', 'queues', 'grains', 'serializers' + ]: + import_name = "salt.{}".format(module) + try: + importlib.import_module(import_name) + config[import_name] = { + 'test.succeed_without_changes': [ + { + "name": import_name, + 'comment': "The '{}' import succeeded.".format(import_name) + } + ] + } + except ModuleNotFoundError as err: + config[import_name] = { + 'test.fail_without_changes': [ + { + "name": import_name, + 'comment': "The '{}' import failed. The error was: {}".format(import_name, err) + } + ] + } + + for import_name in ["telnetlib"]: + try: + importlib.import_module(import_name) + config[import_name] = { + 'test.succeed_without_changes': [ + { + "name": import_name, + 'comment': "The '{}' import succeeded.".format(import_name) + } + ] + } + except ModuleNotFoundError as err: + config[import_name] = { + 'test.fail_without_changes': [ + { + "name": import_name, + 'comment': "The '{}' import failed. The error was: {}".format(import_name, err) + } + ] + } + return config +""" + + +@pytest.fixture +def state_name(salt_master): + name = "check-imports" + with salt_master.state_tree.base.temp_file( + f"{name}.sls", CHECK_IMPORTS_SLS_CONTENTS + ): + if not platform.is_windows() and not platform.is_darwin(): + subprocess.run( + [ + "chown", + "-R", + "salt:salt", + str(salt_master.state_tree.base.write_path), + ], + check=False, + ) + yield name + + +def test_check_imports(salt_cli, salt_minion, state_name): + """ + Test imports + """ + ret = salt_cli.run("state.sls", state_name, minion_tgt=salt_minion.id) + assert ret.returncode == 0 + assert ret.data + result = MultiStateResult(raw=ret.data) + for state_ret in result: + assert state_ret.result is True diff --git a/pkg/tests/integration/test_clean_zmq_teardown.py b/tests/pytests/pkg/integration/test_clean_zmq_teardown.py similarity index 100% rename from pkg/tests/integration/test_clean_zmq_teardown.py rename to tests/pytests/pkg/integration/test_clean_zmq_teardown.py diff --git a/pkg/tests/integration/test_enabled_disabled.py b/tests/pytests/pkg/integration/test_enabled_disabled.py similarity index 96% rename from pkg/tests/integration/test_enabled_disabled.py rename to tests/pytests/pkg/integration/test_enabled_disabled.py index f5a3f9066911..c6f0d75db8fb 100644 --- a/pkg/tests/integration/test_enabled_disabled.py +++ b/tests/pytests/pkg/integration/test_enabled_disabled.py @@ -1,6 +1,5 @@ import pytest from pytestskipmarkers.utils import platform -from saltfactories.utils.functional import MultiStateResult @pytest.mark.skip_on_windows(reason="Linux test only") diff --git a/pkg/tests/integration/test_help.py b/tests/pytests/pkg/integration/test_help.py similarity index 94% rename from pkg/tests/integration/test_help.py rename to tests/pytests/pkg/integration/test_help.py index 4bc4a49401c8..b5070638fe2d 100644 --- a/pkg/tests/integration/test_help.py +++ b/tests/pytests/pkg/integration/test_help.py @@ -1,7 +1,5 @@ import subprocess -from pytestskipmarkers.utils import platform - def test_help(install_salt): """ diff --git a/pkg/tests/integration/test_logrotate_config.py b/tests/pytests/pkg/integration/test_logrotate_config.py similarity index 100% rename from pkg/tests/integration/test_logrotate_config.py rename to tests/pytests/pkg/integration/test_logrotate_config.py diff --git a/pkg/tests/integration/test_multi_minion.py b/tests/pytests/pkg/integration/test_multi_minion.py similarity index 100% rename from pkg/tests/integration/test_multi_minion.py rename to tests/pytests/pkg/integration/test_multi_minion.py diff --git a/pkg/tests/integration/test_pip.py b/tests/pytests/pkg/integration/test_pip.py similarity index 90% rename from pkg/tests/integration/test_pip.py rename to tests/pytests/pkg/integration/test_pip.py index d61f9a7d9da5..849dbbbfb8b6 100644 --- a/pkg/tests/integration/test_pip.py +++ b/tests/pytests/pkg/integration/test_pip.py @@ -42,6 +42,14 @@ def wipe_pydeps(shell, install_salt, extras_pypath): shutil.rmtree(dirname, ignore_errors=True) +@pytest.fixture +def pkg_tests_account_environ(pkg_tests_account): + environ = os.environ.copy() + environ["LOGNAME"] = environ["USER"] = pkg_tests_account.username + environ["HOME"] = pkg_tests_account.info.home + return environ + + def test_pip_install(salt_call_cli, install_salt, shell): """ Test pip.install and ensure module can use installed library @@ -98,18 +106,25 @@ def test_pip_install_extras(shell, install_salt, extras_pypath_bin): assert ret.returncode == 0 -def demote(user_uid, user_gid): +def demote(account): def result(): # os.setgid does not remove group membership, so we remove them here so they are REALLY non-root os.setgroups([]) - os.setgid(user_gid) - os.setuid(user_uid) + os.setgid(account.info.gid) + os.setuid(account.info.uid) return result @pytest.mark.skip_on_windows(reason="We can't easily demote users on Windows") -def test_pip_non_root(shell, install_salt, test_account, extras_pypath_bin, pypath): +def test_pip_non_root( + shell, + install_salt, + pkg_tests_account, + extras_pypath_bin, + pypath, + pkg_tests_account_environ, +): if install_salt.classic: pytest.skip("We can install non-root for classic packages") check_path = extras_pypath_bin / "pep8" @@ -118,8 +133,8 @@ def test_pip_non_root(shell, install_salt, test_account, extras_pypath_bin, pypa # We should be able to issue a --help without being root ret = subprocess.run( install_salt.binary_paths["salt"] + ["--help"], - preexec_fn=demote(test_account.uid, test_account.gid), - env=test_account.env, + preexec_fn=demote(pkg_tests_account), + env=pkg_tests_account_environ, capture_output=True, check=False, text=True, @@ -139,8 +154,8 @@ def test_pip_non_root(shell, install_salt, test_account, extras_pypath_bin, pypa # Now, we should still not be able to install as non-root ret = subprocess.run( install_salt.binary_paths["pip"] + ["install", "pep8"], - preexec_fn=demote(test_account.uid, test_account.gid), - env=test_account.env, + preexec_fn=demote(pkg_tests_account), + env=pkg_tests_account_environ, capture_output=True, check=False, text=True, diff --git a/pkg/tests/integration/test_pip_upgrade.py b/tests/pytests/pkg/integration/test_pip_upgrade.py similarity index 93% rename from pkg/tests/integration/test_pip_upgrade.py rename to tests/pytests/pkg/integration/test_pip_upgrade.py index 19ed1d6d3364..f306d99df709 100644 --- a/pkg/tests/integration/test_pip_upgrade.py +++ b/tests/pytests/pkg/integration/test_pip_upgrade.py @@ -16,9 +16,8 @@ def test_pip_install(install_salt, salt_call_cli): """ ret = subprocess.run( install_salt.binary_paths["salt"] + ["--versions-report"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, + capture_output=True, + text=True, check=True, shell=False, ) @@ -69,9 +68,8 @@ def test_pip_install(install_salt, salt_call_cli): ret = subprocess.run( install_salt.binary_paths["salt"] + ["--versions-report"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, + capture_output=True, + text=True, check=True, shell=False, ) diff --git a/pkg/tests/integration/test_pkg.py b/tests/pytests/pkg/integration/test_pkg.py similarity index 100% rename from pkg/tests/integration/test_pkg.py rename to tests/pytests/pkg/integration/test_pkg.py diff --git a/tests/pytests/pkg/integration/test_python.py b/tests/pytests/pkg/integration/test_python.py new file mode 100644 index 000000000000..9b16cea37964 --- /dev/null +++ b/tests/pytests/pkg/integration/test_python.py @@ -0,0 +1,75 @@ +import subprocess +import textwrap + +import pytest + + +@pytest.fixture +def python_script_bin(install_salt): + # Tiamat builds run scripts via `salt python` + if not install_salt.relenv and not install_salt.classic: + return install_salt.binary_paths["python"][:1] + ["python"] + return install_salt.binary_paths["python"] + + +@pytest.fixture +def check_python_file(tmp_path): + script_path = tmp_path / "check_python.py" + script_path.write_text( + textwrap.dedent( + """ + import sys + + import salt.utils.data + + user_arg = sys.argv + + if user_arg[1] == "raise": + raise Exception("test") + + if salt.utils.data.is_true(user_arg[1]): + sys.exit(0) + else: + sys.exit(1) + """ + ) + ) + return script_path + + +@pytest.mark.parametrize("exp_ret,user_arg", [(1, "false"), (0, "true")]) +def test_python_script( + install_salt, exp_ret, user_arg, python_script_bin, check_python_file +): + ret = install_salt.proc.run( + *( + python_script_bin + + [ + str(check_python_file), + user_arg, + ] + ), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False, + universal_newlines=True, + ) + + assert ret.returncode == exp_ret, ret.stderr + + +def test_python_script_exception(install_salt, python_script_bin, check_python_file): + ret = install_salt.proc.run( + *( + python_script_bin + + [ + str(check_python_file), + "raise", + ] + ), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False, + universal_newlines=True, + ) + assert "Exception: test" in ret.stderr diff --git a/pkg/tests/integration/test_salt_api.py b/tests/pytests/pkg/integration/test_salt_api.py similarity index 100% rename from pkg/tests/integration/test_salt_api.py rename to tests/pytests/pkg/integration/test_salt_api.py diff --git a/pkg/tests/integration/test_salt_call.py b/tests/pytests/pkg/integration/test_salt_call.py similarity index 52% rename from pkg/tests/integration/test_salt_call.py rename to tests/pytests/pkg/integration/test_salt_call.py index da7834b70b18..69f434a2c40a 100644 --- a/pkg/tests/integration/test_salt_call.py +++ b/tests/pytests/pkg/integration/test_salt_call.py @@ -1,4 +1,7 @@ +import subprocess + import pytest +from pytestskipmarkers.utils import platform def test_salt_call_local(salt_call_cli): @@ -6,8 +9,8 @@ def test_salt_call_local(salt_call_cli): Test salt-call --local test.ping """ ret = salt_call_cli.run("--local", "test.ping") - assert ret.data is True assert ret.returncode == 0 + assert ret.data is True def test_salt_call(salt_call_cli): @@ -15,19 +18,41 @@ def test_salt_call(salt_call_cli): Test salt-call test.ping """ ret = salt_call_cli.run("test.ping") - assert ret.data is True assert ret.returncode == 0 + assert ret.data is True + + +@pytest.fixture +def state_name(salt_master): + name = "some-test-state" + sls_contents = """ + test_foo: + test.succeed_with_changes: + - name: foo + """ + with salt_master.state_tree.base.temp_file(f"{name}.sls", sls_contents): + if not platform.is_windows() and not platform.is_darwin(): + subprocess.run( + [ + "chown", + "-R", + "salt:salt", + str(salt_master.state_tree.base.write_path), + ], + check=False, + ) + yield name -def test_sls(salt_call_cli): +def test_sls(salt_call_cli, state_name): """ Test calling a sls file """ - ret = salt_call_cli.run("state.apply", "test") - assert ret.data, ret + ret = salt_call_cli.run("state.apply", state_name) + assert ret.returncode == 0 + assert ret.data sls_ret = ret.data[next(iter(ret.data))] assert sls_ret["changes"]["testing"]["new"] == "Something pretended to change" - assert ret.returncode == 0 def test_salt_call_local_sys_doc_none(salt_call_cli): @@ -35,8 +60,8 @@ def test_salt_call_local_sys_doc_none(salt_call_cli): Test salt-call --local sys.doc none """ ret = salt_call_cli.run("--local", "sys.doc", "none") - assert not ret.data assert ret.returncode == 0 + assert not ret.data def test_salt_call_local_sys_doc_aliases(salt_call_cli): @@ -44,16 +69,18 @@ def test_salt_call_local_sys_doc_aliases(salt_call_cli): Test salt-call --local sys.doc aliases """ ret = salt_call_cli.run("--local", "sys.doc", "aliases.list_aliases") - assert "aliases.list_aliases" in ret.data assert ret.returncode == 0 + assert "aliases.list_aliases" in ret.data -@pytest.mark.skip_on_windows() -def test_salt_call_cmd_run_id_runas(salt_call_cli, test_account, caplog): +@pytest.mark.skip_on_windows +def test_salt_call_cmd_run_id_runas(salt_call_cli, pkg_tests_account, caplog): """ Test salt-call --local cmd_run id with runas """ - ret = salt_call_cli.run("--local", "cmd.run", "id", runas=test_account.username) + ret = salt_call_cli.run( + "--local", "cmd.run", "id", runas=pkg_tests_account.username + ) assert "Environment could not be retrieved for user" not in caplog.text - assert str(test_account.uid) in ret.stdout - assert str(test_account.gid) in ret.stdout + assert str(pkg_tests_account.info.uid) in ret.stdout + assert str(pkg_tests_account.info.gid) in ret.stdout diff --git a/pkg/tests/integration/test_salt_exec.py b/tests/pytests/pkg/integration/test_salt_exec.py similarity index 100% rename from pkg/tests/integration/test_salt_exec.py rename to tests/pytests/pkg/integration/test_salt_exec.py diff --git a/pkg/tests/integration/test_salt_grains.py b/tests/pytests/pkg/integration/test_salt_grains.py similarity index 100% rename from pkg/tests/integration/test_salt_grains.py rename to tests/pytests/pkg/integration/test_salt_grains.py diff --git a/pkg/tests/integration/test_salt_key.py b/tests/pytests/pkg/integration/test_salt_key.py similarity index 100% rename from pkg/tests/integration/test_salt_key.py rename to tests/pytests/pkg/integration/test_salt_key.py diff --git a/pkg/tests/integration/test_salt_minion.py b/tests/pytests/pkg/integration/test_salt_minion.py similarity index 100% rename from pkg/tests/integration/test_salt_minion.py rename to tests/pytests/pkg/integration/test_salt_minion.py diff --git a/pkg/tests/integration/test_salt_output.py b/tests/pytests/pkg/integration/test_salt_output.py similarity index 100% rename from pkg/tests/integration/test_salt_output.py rename to tests/pytests/pkg/integration/test_salt_output.py diff --git a/tests/pytests/pkg/integration/test_salt_pillar.py b/tests/pytests/pkg/integration/test_salt_pillar.py new file mode 100644 index 000000000000..f6cacf14b3ca --- /dev/null +++ b/tests/pytests/pkg/integration/test_salt_pillar.py @@ -0,0 +1,44 @@ +import subprocess + +import pytest +from pytestskipmarkers.utils import platform + +pytestmark = [ + pytest.mark.skip_on_windows, +] + + +@pytest.fixture +def pillar_name(salt_master): + name = "info" + top_file_contents = """ + base: + '*': + - test + """ + test_file_contents = f""" + {name}: test + """ + with salt_master.pillar_tree.base.temp_file( + "top.sls", top_file_contents + ), salt_master.pillar_tree.base.temp_file("test.sls", test_file_contents): + if not platform.is_windows() and not platform.is_darwin(): + subprocess.run( + [ + "chown", + "-R", + "salt:salt", + str(salt_master.pillar_tree.base.write_path), + ], + check=False, + ) + yield name + + +def test_salt_pillar(salt_cli, salt_minion, pillar_name): + """ + Test pillar.items + """ + ret = salt_cli.run("pillar.items", minion_tgt=salt_minion.id) + assert ret.returncode == 0 + assert pillar_name in ret.data diff --git a/tests/pytests/pkg/integration/test_salt_state_file.py b/tests/pytests/pkg/integration/test_salt_state_file.py new file mode 100644 index 000000000000..7b71fcb2365e --- /dev/null +++ b/tests/pytests/pkg/integration/test_salt_state_file.py @@ -0,0 +1,70 @@ +import subprocess +import types + +import pytest +from pytestskipmarkers.utils import platform +from saltfactories.utils.functional import MultiStateResult + + +@pytest.fixture +def files(tmp_path): + return types.SimpleNamespace( + fpath_1=tmp_path / "fpath_1.txt", + fpath_2=tmp_path / "fpath_2.txt", + fpath_3=tmp_path / "fpath_3.txt", + ) + + +@pytest.fixture +def state_name(files, salt_master): + name = "some-state" + sls_contents = f""" + create-fpath-1-file: + file.managed: + - name: {files.fpath_1} + + create-fpath-2-file: + file.managed: + - name: {files.fpath_2} + + create-fpath-3-file: + file.managed: + - name: {files.fpath_3} + """ + assert files.fpath_1.exists() is False + assert files.fpath_2.exists() is False + assert files.fpath_3.exists() is False + with salt_master.state_tree.base.temp_file(f"{name}.sls", sls_contents): + if not platform.is_windows() and not platform.is_darwin(): + subprocess.run( + [ + "chown", + "-R", + "salt:salt", + str(salt_master.state_tree.base.write_path), + ], + check=False, + ) + yield name + + +def test_salt_state_file(salt_cli, salt_minion, state_name, files): + """ + Test state file + """ + assert files.fpath_1.exists() is False + assert files.fpath_2.exists() is False + assert files.fpath_3.exists() is False + + ret = salt_cli.run("state.apply", state_name, minion_tgt=salt_minion.id) + assert ret.returncode == 0 + assert ret.data + if ret.stdout and "Minion did not return" in ret.stdout: + pytest.skip("Skipping test, state took too long to apply") + + for state_return in MultiStateResult(ret.data): + assert state_return.result is True + + assert files.fpath_1.exists() is True + assert files.fpath_2.exists() is True + assert files.fpath_3.exists() is True diff --git a/pkg/tests/integration/test_salt_ufw.py b/tests/pytests/pkg/integration/test_salt_ufw.py similarity index 100% rename from pkg/tests/integration/test_salt_ufw.py rename to tests/pytests/pkg/integration/test_salt_ufw.py diff --git a/pkg/tests/integration/test_salt_user.py b/tests/pytests/pkg/integration/test_salt_user.py similarity index 93% rename from pkg/tests/integration/test_salt_user.py rename to tests/pytests/pkg/integration/test_salt_user.py index f785c6854d2c..4538ce79adbb 100644 --- a/pkg/tests/integration/test_salt_user.py +++ b/tests/pytests/pkg/integration/test_salt_user.py @@ -90,7 +90,7 @@ def test_salt_user_home(install_salt): home = "" try: home = proc.stdout.decode().split(":")[5] - except: + except Exception: # pylint: disable=broad-except pass assert home == "/opt/saltstack/salt" @@ -106,7 +106,7 @@ def test_salt_user_group(install_salt): for group in proc.stdout.decode().split(" "): if "salt" in group: in_group = True - except: + except Exception: # pylint: disable=broad-except pass assert in_group is True @@ -124,7 +124,7 @@ def test_salt_user_shell(install_salt): try: shell = proc.stdout.decode().split(":")[6].strip() shell_exists = pathlib.Path(shell).exists() - except: + except Exception: # pylint: disable=broad-except pass assert shell_exists is True @@ -175,7 +175,7 @@ def test_pkg_paths( @pytest.mark.skip_if_binaries_missing("logrotate") def test_paths_log_rotation( - salt_master, salt_minion, salt_call_cli, install_salt, test_account + salt_master, salt_minion, salt_call_cli, install_salt, pkg_tests_account ): """ Test the correct ownership is assigned when log rotation occurs @@ -252,7 +252,8 @@ def test_paths_log_rotation( f"cp -a {_path}/* {str(temp_dir_path_4)}/" ) elif bkup_count > 5: - assert bkupcount < bkup_count_max # force assertion + # force assertion + assert bkup_count < bkup_count_max ret = salt_call_cli.run( "--local", "cmd.run", cmd_to_run @@ -266,7 +267,7 @@ def test_paths_log_rotation( "file.replace", f"{install_salt.conf_dir}/master", "user: salt", - f"user: {test_account.username}", + f"user: {pkg_tests_account.username}", "flags=['IGNORECASE']", "append_if_not_found=True", ) @@ -275,7 +276,7 @@ def test_paths_log_rotation( # change ownership of appropriate paths to user for _path in log_pkg_paths: chg_ownership_cmd = ( - f"chown -R {test_account.username} {_path}" + f"chown -R {pkg_tests_account.username} {_path}" ) ret = salt_call_cli.run( "--local", "cmd.run", chg_ownership_cmd @@ -316,7 +317,9 @@ def test_paths_log_rotation( for _path in log_files_list: log_path = pathlib.Path(_path) assert log_path.exists() - assert log_path.owner() == test_account.username + assert ( + log_path.owner() == pkg_tests_account.username + ) assert log_path.stat().st_mode & 0o7777 == 0o640 # cleanup @@ -327,7 +330,7 @@ def test_paths_log_rotation( "--local", "file.replace", f"{install_salt.conf_dir}/master", - f"user: {test_account.username}", + f"user: {pkg_tests_account.username}", "user: salt", "flags=['IGNORECASE']", "append_if_not_found=True", @@ -349,7 +352,8 @@ def test_paths_log_rotation( # use --update since /opt/saltstack/salt and would get SIGSEGV since mucking with running code cmd_to_run = f"cp -a --update --force {str(temp_dir_path_4)}/* {_path}/" elif bkup_count > 5: - assert bkupcount < bkup_count_max # force assertion + # force assertion + assert bkup_count < bkup_count_max ret = salt_call_cli.run( "--local", "cmd.run", cmd_to_run diff --git a/pkg/tests/integration/test_ssm.py b/tests/pytests/pkg/integration/test_ssm.py similarity index 100% rename from pkg/tests/integration/test_ssm.py rename to tests/pytests/pkg/integration/test_ssm.py diff --git a/pkg/tests/integration/test_systemd_config.py b/tests/pytests/pkg/integration/test_systemd_config.py similarity index 100% rename from pkg/tests/integration/test_systemd_config.py rename to tests/pytests/pkg/integration/test_systemd_config.py diff --git a/pkg/tests/integration/test_version.py b/tests/pytests/pkg/integration/test_version.py similarity index 99% rename from pkg/tests/integration/test_version.py rename to tests/pytests/pkg/integration/test_version.py index eec695547836..89a7e83db069 100644 --- a/pkg/tests/integration/test_version.py +++ b/tests/pytests/pkg/integration/test_version.py @@ -1,6 +1,5 @@ import os.path import pathlib -import re import subprocess import pytest @@ -32,6 +31,7 @@ def test_salt_versions_report_master(install_salt): ret.stdout.matcher.fnmatch_lines(["*Salt Version:*"]) py_version = subprocess.run( [str(python_bin), "--version"], + check=True, capture_output=True, ).stdout py_version = py_version.decode().strip().replace(" ", ": ") diff --git a/pkg/tests/support/__init__.py b/tests/pytests/pkg/upgrade/__init__.py similarity index 100% rename from pkg/tests/support/__init__.py rename to tests/pytests/pkg/upgrade/__init__.py diff --git a/pkg/tests/upgrade/test_salt_upgrade.py b/tests/pytests/pkg/upgrade/test_salt_upgrade.py similarity index 100% rename from pkg/tests/upgrade/test_salt_upgrade.py rename to tests/pytests/pkg/upgrade/test_salt_upgrade.py diff --git a/tests/pytests/unit/grains/test_core.py b/tests/pytests/unit/grains/test_core.py index c75794e5418e..a889fbb22857 100644 --- a/tests/pytests/unit/grains/test_core.py +++ b/tests/pytests/unit/grains/test_core.py @@ -7,6 +7,7 @@ """ import errno +import locale import logging import os import pathlib @@ -15,6 +16,7 @@ import sys import tempfile import textwrap +import uuid from collections import namedtuple import pytest @@ -177,6 +179,15 @@ def test_network_grains_secondary_ip(tmp_path): "2001:4860:4860::8888", ] + with patch("salt.utils.platform.is_proxy", return_value=True): + assert core.ip6_interfaces() == {} + + with patch("salt.utils.platform.is_proxy", return_value=True): + assert core.ip4_interfaces() == {} + + with patch("salt.utils.platform.is_proxy", return_value=True): + assert core.ip_interfaces() == {} + def test_network_grains_cache(tmp_path): """ @@ -335,6 +346,16 @@ def test_parse_cpe_name_wfn(cpe, cpe_ret): "part": None, }, ), + ( + "cpe:2.3:o:microsoft:windows_xp:5.1.601", + { + "phase": None, + "version": "5.1.601", + "product": "windows_xp", + "vendor": "microsoft", + "part": "operating system", + }, + ), ), ) def test_parse_cpe_name_v23(cpe, cpe_ret): @@ -1631,6 +1652,17 @@ def test_linux_memdata(): assert memdata.get("mem_total") == 15895 assert memdata.get("swap_total") == 4676 + _proc_meminfo = textwrap.dedent( + """\ + MemTotal: 16277028 kB + + SwapTotal: 4789244 kB""" + ) + with patch("salt.utils.files.fopen", mock_open(read_data=_proc_meminfo)): + memdata = core._linux_memdata() + assert memdata.get("mem_total") == 15895 + assert memdata.get("swap_total") == 4676 + @pytest.mark.skip_on_windows def test_bsd_memdata(): @@ -1991,6 +2023,16 @@ def test_fqdn_return(ipv4_tuple, ipv6_tuple): ) +@pytest.mark.skip_unless_on_linux +def test_fqdn_proxy_return_empty(): + """ + test ip_fqdn returns empty for proxy minions + """ + + with patch.object(salt.utils.platform, "is_proxy", MagicMock(return_value=True)): + assert core.ip_fqdn() == {} + + @pytest.mark.skip_unless_on_linux def test_fqdn6_empty(ipv4_tuple, ipv6_tuple): """ @@ -2136,6 +2178,19 @@ def test_dns_return(ipv4_tuple, ipv6_tuple): ): assert core.dns() == ret + with patch("os.path.exists", return_value=False), patch.object( + salt.utils.dns, "parse_resolv", MagicMock(return_value=resolv_mock) + ): + assert core.dns() == ret + + with patch.object(salt.utils.platform, "is_windows", MagicMock(return_value=True)): + assert core.dns() == {} + + with patch.object( + salt.utils.platform, "is_windows", MagicMock(return_value=True) + ), patch("salt.grains.core.__opts__", {"proxyminion": True}): + assert core.dns() == {} + def test_enable_fqdns_false(): """ @@ -2569,19 +2624,52 @@ def test_osx_memdata(): test osx memdata """ - def _cmd_side_effect(cmd): + def _cmd_side_effect_megabyte(cmd): if "hw.memsize" in cmd: return "4294967296" elif "vm.swapusage" in cmd: return "total = 0.00M used = 0.00M free = 0.00M (encrypted)" with patch.dict( - core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect)} + core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect_megabyte)} + ), patch("salt.utils.path.which", MagicMock(return_value="/usr/sbin/sysctl")): + ret = core._osx_memdata() + assert ret["swap_total"] == 0 + assert ret["mem_total"] == 4096 + + def _cmd_side_effect_kilobyte(cmd): + if "hw.memsize" in cmd: + return "4294967296" + elif "vm.swapusage" in cmd: + return "total = 0.00K used = 0.00K free = 0.00K (encrypted)" + + with patch.dict( + core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect_kilobyte)} ), patch("salt.utils.path.which", MagicMock(return_value="/usr/sbin/sysctl")): ret = core._osx_memdata() assert ret["swap_total"] == 0 assert ret["mem_total"] == 4096 + def _cmd_side_effect_gigabyte(cmd): + if "hw.memsize" in cmd: + return "4294967296" + elif "vm.swapusage" in cmd: + return "total = 0.00G used = 0.00G free = 0.00G (encrypted)" + + with patch.dict( + core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect_gigabyte)} + ), patch("salt.utils.path.which", MagicMock(return_value="/usr/sbin/sysctl")): + ret = core._osx_memdata() + assert ret["swap_total"] == 0 + assert ret["mem_total"] == 4096 + + with patch.dict( + core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect_gigabyte)} + ), patch("salt.utils.path.which", MagicMock(return_value="/usr/sbin/sysctl")): + ret = core._memdata({"kernel": "Darwin"}) + assert ret["swap_total"] == 0 + assert ret["mem_total"] == 4096 + @pytest.mark.skipif(not core._DATEUTIL_TZ, reason="Missing dateutil.tz") def test_locale_info_tzname(): @@ -2668,6 +2756,36 @@ def test_locale_info_no_tz_tzname(): assert ret["locale_info"]["timezone"] == "unknown" +def test_locale_info_proxy_empty(): + with patch.object(salt.utils.platform, "is_proxy", return_value=True): + ret = core.locale_info() + assert ret == {"locale_info": {}} + + +@pytest.mark.skipif(not core._DATEUTIL_TZ, reason="Missing dateutil.tz") +def test_locale_getlocale_exception(): + # mock datetime.now().tzname() + # cant just mock now because it is read only + tzname = Mock(return_value="MDT_FAKE") + now_ret_object = Mock(tzname=tzname) + now = Mock(return_value=now_ret_object) + datetime = Mock(now=now) + + with patch.object( + core, "datetime", datetime=datetime + ) as datetime_module, patch.object( + core.dateutil.tz, "tzlocal", return_value=object + ) as tzlocal, patch.object( + salt.utils.platform, "is_proxy", return_value=False + ) as is_proxy, patch.object( + locale, "getlocale", side_effect=Exception() + ): + ret = core.locale_info() + + assert ret["locale_info"]["defaultlanguage"] == "unknown" + assert ret["locale_info"]["defaultencoding"] == "unknown" + + def test_cwd_exists(): cwd_grain = core.cwd() @@ -2798,6 +2916,10 @@ def test_virtual_has_virtual_grain(): {"kernel": "Windows", "manufacturer": "Parallels Software"}, {"virtual": "Parallels"}, ), + ( + {"kernel": "Windows", "manufacturer": None, "productname": None}, + {"virtual": "physical"}, + ), ], ) def test__windows_virtual(osdata, expected): @@ -2889,7 +3011,7 @@ def test_osdata_virtual_key_win(): @pytest.mark.skip_unless_on_linux -def test_linux_cpu_data_num_cpus(): +def test_linux_cpu_data(): cpuinfo_list = [] for i in range(0, 20): cpuinfo_dict = { @@ -2913,6 +3035,60 @@ def test_linux_cpu_data_num_cpus(): ret = core._linux_cpudata() assert "num_cpus" in ret assert len(cpuinfo_list) == ret["num_cpus"] + assert "cpu_flags" in ret + assert "cpu_model" in ret + + cpuinfo_list = [] + cpuinfo_dict = { + "processors": 20, + "cpu_family": 6, + "model_name": "Intel(R) Core(TM) i7-7700HQ CPU @ 2.80GHz", + "Features": "fpu vme de pse tsc msr pae mce cx8 apic sep mtrr", + } + + cpuinfo_list.append(cpuinfo_dict) + cpuinfo_content = "" + for item in cpuinfo_list: + cpuinfo_content += ( + "# processors: {}\n" "cpu family: {}\n" "vendor_id: {}\n" "Features: {}\n\n" + ).format( + item["processors"], item["cpu_family"], item["model_name"], item["Features"] + ) + + with patch.object(os.path, "isfile", MagicMock(return_value=True)), patch( + "salt.utils.files.fopen", mock_open(read_data=cpuinfo_content) + ): + ret = core._linux_cpudata() + assert "num_cpus" in ret + assert "cpu_flags" in ret + assert "cpu_model" in ret + + cpuinfo_dict = { + "Processor": "ARMv6-compatible processor rev 7 (v6l)", + "BogoMIPS": "697.95", + "Features": "swp half thumb fastmult vfp edsp java tls", + "CPU implementer": "0x41", + "CPU architecture": "7", + "CPU variant": "0x0", + "CPU part": "0xb76", + "CPU revision": "7", + "Hardware": "BCM2708", + "Revision": "0002", + "Serial": "00000000", + } + + cpuinfo_content = "" + for item in cpuinfo_dict: + cpuinfo_content += f"{item}: {cpuinfo_dict[item]}\n" + cpuinfo_content += "\n\n" + + with patch.object(os.path, "isfile", MagicMock(return_value=True)), patch( + "salt.utils.files.fopen", mock_open(read_data=cpuinfo_content) + ): + ret = core._linux_cpudata() + assert "num_cpus" in ret + assert "cpu_flags" in ret + assert "cpu_model" in ret @pytest.mark.skip_on_windows @@ -2997,6 +3173,16 @@ def test_saltversioninfo(): assert all([isinstance(x, int) for x in info]) +def test_saltversion(): + """ + test saltversion core grain. + """ + ret = core.saltversion() + info = ret["saltversion"] + assert isinstance(ret, dict) + assert isinstance(info, str) + + def test_path(): comps = ["foo", "bar", "baz"] path = os.path.pathsep.join(comps) @@ -3140,7 +3326,25 @@ def _open_mock(file_name, *args, **kwargs): assert core.kernelparams() == expected -def test_linux_gpus(): +@pytest.mark.skip_unless_on_linux +def test_kernelparams_file_not_found_error(): + with patch("salt.utils.files.fopen", MagicMock()) as fopen_mock: + fopen_mock.side_effect = FileNotFoundError() + ret = core.kernelparams() + assert ret == {} + + +@pytest.mark.skip_unless_on_linux +def test_kernelparams_oserror(caplog): + with patch("salt.utils.files.fopen", MagicMock()) as fopen_mock: + with caplog.at_level(logging.DEBUG): + fopen_mock.side_effect = OSError() + ret = core.kernelparams() + assert ret == {} + assert "Failed to read /proc/cmdline: " in caplog.messages + + +def test_linux_gpus(caplog): """ Test GPU detection on Linux systems """ @@ -3205,6 +3409,15 @@ def _cmd_side_effect(cmd): "intel", ], # Display controller ] + + with patch("salt.grains.core.__opts__", {"enable_lspci": False}): + ret = core._linux_gpu_data() + assert ret == {} + + with patch("salt.grains.core.__opts__", {"enable_gpu_grains": False}): + ret = core._linux_gpu_data() + assert ret == {} + with patch( "salt.utils.path.which", MagicMock(return_value="/usr/sbin/lspci") ), patch.dict(core.__salt__, {"cmd.run": MagicMock(side_effect=_cmd_side_effect)}): @@ -3217,6 +3430,37 @@ def _cmd_side_effect(cmd): assert ret[count]["vendor"] == device[3] count += 1 + with patch( + "salt.utils.path.which", MagicMock(return_value="/usr/sbin/lspci") + ), patch.dict(core.__salt__, {"cmd.run": MagicMock(side_effect=OSError)}): + ret = core._linux_gpu_data() + assert ret == {"num_gpus": 0, "gpus": []} + + bad_gpu_data = textwrap.dedent( + """ + Class: VGA compatible controller + Vendor: Advanced Micro Devices, Inc. [AMD/ATI] + Device: Vega [Radeon RX Vega]] + SVendor; Evil Corp. + SDevice: Graphics XXL + Rev: c1 + NUMANode: 0""" + ) + + with patch( + "salt.utils.path.which", MagicMock(return_value="/usr/sbin/lspci") + ), patch.dict( + core.__salt__, {"cmd.run": MagicMock(return_value=bad_gpu_data)} + ), caplog.at_level( + logging.WARN + ): + core._linux_gpu_data() + assert ( + "Error loading grains, unexpected linux_gpu_data output, " + "check that you have a valid shell configured and permissions " + "to run lspci command" in caplog.messages + ) + def test_get_server_id(): expected = {"server_id": 94889706} @@ -3226,6 +3470,9 @@ def test_get_server_id(): with patch.dict(core.__opts__, {"id": "otherid"}): assert core.get_server_id() != expected + with patch.object(salt.utils.platform, "is_proxy", MagicMock(return_value=True)): + assert core.get_server_id() == {} + def test_linux_cpudata_ppc64le(): cpuinfo = """processor : 0 @@ -3621,3 +3868,1169 @@ def _mock_is_file(filename): assert virtual_grains["virtual"] == "Nitro" assert virtual_grains["virtual_subtype"] == "Amazon EC2" + + +def test_append_domain(): + """ + test append_domain + """ + + assert core.append_domain() == {} + + with patch.object(salt.utils.platform, "is_proxy", MagicMock(return_value=True)): + assert core.append_domain() == {} + + with patch("salt.grains.core.__opts__", {"append_domain": "example.com"}): + assert core.append_domain() == {"append_domain": "example.com"} + + +def test_hostname(): + """ + test append_domain + """ + + with patch.object(salt.utils.platform, "is_proxy", MagicMock(return_value=True)): + assert core.hostname() == {} + + with patch("salt.grains.core.__FQDN__", None), patch( + "socket.gethostname", MagicMock(return_value=None) + ), patch("salt.utils.network.get_fqhostname", MagicMock(return_value=None)): + assert core.hostname() == { + "localhost": None, + "fqdn": "localhost.localdomain", + "host": "localhost", + "domain": "localdomain", + } + + +def test_zmqversion(): + """ + test zmqversion + """ + + ret = core.zmqversion() + assert "zmqversion" in ret + + with patch.dict("sys.modules", {"zmq": None}): + ret = core.zmqversion() + assert "zmqversion" not in ret + + +def test_saltpath(): + """ + test saltpath + """ + + ret = core.saltpath() + assert "saltpath" in ret + + +def test_pythonexecutable(): + """ + test pythonexecutable + """ + python_executable = sys.executable + + ret = core.pythonexecutable() + assert "pythonexecutable" in ret + assert ret["pythonexecutable"] == python_executable + + +def test_pythonpath(): + """ + test pythonpath + """ + python_path = sys.path + + ret = core.pythonpath() + assert "pythonpath" in ret + assert ret["pythonpath"] == python_path + + +def test_pythonversion(): + """ + test pythonversion + """ + python_version = [*sys.version_info] + + ret = core.pythonversion() + assert "pythonversion" in ret + assert ret["pythonversion"] == python_version + + +@pytest.mark.skip_unless_on_linux +def test_get_machine_id(): + """ + test get_machine_id + """ + + ret = core.get_machine_id() + assert "machine_id" in ret + + with patch.object(os.path, "exists", return_value=False): + ret = core.get_machine_id() + assert ret == {} + + with patch.object(platform, "system", return_value="AIX"): + with patch.object(core, "_aix_get_machine_id", return_value="AIX-MACHINE-ID"): + ret = core.get_machine_id() + assert ret == "AIX-MACHINE-ID" + + +def test_hwaddr_interfaces(): + """ + test hwaddr_interfaces + """ + + mock_get_interfaces = { + "lo": { + "up": True, + "hwaddr": "00:00:00:00:00:00", + "inet": [ + { + "address": "127.0.0.1", + "netmask": "255.0.0.0", + "broadcast": None, + "label": "lo", + } + ], + "inet6": [], + }, + "eth1": { + "up": True, + "hwaddr": "00:00:00:00:00:00", + "inet": [ + { + "address": "0.0.0.0", + "netmask": "255.255.255.0", + "broadcast": "0.0.0.0", + "label": "wlo1", + } + ], + "inet6": [], + }, + } + with patch.object(core, "_get_interfaces", return_value=mock_get_interfaces): + ret = core.hwaddr_interfaces() + assert "hwaddr_interfaces" in ret + assert ret["hwaddr_interfaces"] == { + "lo": "00:00:00:00:00:00", + "eth1": "00:00:00:00:00:00", + } + + +def test_id(): + """ + test id + """ + ret = core.id_() + assert "id" in ret + + with patch("salt.grains.core.__opts__", {"id": "test_id_minion_id"}): + ret = core.id_() + assert "id" in ret + assert ret["id"] == "test_id_minion_id" + + +def test__linux_bin_exists(): + """ + test __linux_bin_exists + """ + mock_retcode = [salt.exceptions.CommandExecutionError, 0] + with patch.dict( + core.__salt__, {"cmd.retcode": MagicMock(side_effect=mock_retcode)} + ): + ret = core._linux_bin_exists("ls") + assert ret + + mock_retcode = salt.exceptions.CommandExecutionError + mock_runall = [ + {"pid": 100, "retcode": 0, "stdout": "ls: /usr/bin/ls", "stderr": ""} + ] + with patch.dict( + core.__salt__, {"cmd.retcode": MagicMock(side_effect=mock_retcode)} + ): + with patch.dict( + core.__salt__, {"cmd.run_all": MagicMock(side_effect=mock_runall)} + ): + ret = core._linux_bin_exists("ls") + assert ret + + mock_retcode = salt.exceptions.CommandExecutionError + mock_runall = salt.exceptions.CommandExecutionError + + with patch.dict( + core.__salt__, {"cmd.retcode": MagicMock(side_effect=mock_retcode)} + ): + with patch.dict( + core.__salt__, {"cmd.run_all": MagicMock(side_effect=mock_runall)} + ): + ret = core._linux_bin_exists("ls") + assert not ret + + +def test__parse_lsb_release(): + """ + test __parse_lsb_release + """ + mock_lsb_file = """ +DISTRIB_ID="ManjaroLinux" +DISTRIB_RELEASE="23.0.2" +DISTRIB_CODENAME="Uranos" +DISTRIB_DESCRIPTION="Manjaro Linux" +""" + + with patch("salt.utils.files.fopen", mock_open(read_data=mock_lsb_file)): + ret = core._parse_lsb_release() + assert ret == { + "lsb_distrib_id": "ManjaroLinux", + "lsb_distrib_release": "23.0.2", + "lsb_distrib_codename": "Uranos", + "lsb_distrib_description": "Manjaro Linux", + } + + with patch("salt.utils.files.fopen", side_effect=OSError): + ret = core._parse_lsb_release() + assert ret == {} + + +def test__osx_gpudata(): + """ + test __osx_gpudata + """ + mock_gpudata = """ +Graphics/Displays: + + NVIDIA GeForce 320M: + + Chipset Model: NVIDIA GeForce 320M + Type: GPU + VRAM (Total): 256 MB + Vendor: NVIDIA (0x10de) + Device ID: 0x08a0 + Revision ID: 0x00a2 + ROM Revision: 3533 + Displays: + Color LCD: + Display Type: LCD + Resolution: 1280 x 800 + UI Looks like: 1280 x 800 + Framebuffer Depth: 24-Bit Color (ARGB8888) + Main Display: Yes + Mirror: Off + Online: Yes + Automatically Adjust Brightness: Yes + Connection Type: Internal + +""" + with patch.dict(core.__salt__, {"cmd.run": MagicMock(return_value=mock_gpudata)}): + ret = core._osx_gpudata() + assert ret["num_gpus"] == 1 + assert ret["gpus"] == [{"vendor": "nvidia", "model": "GeForce 320M"}] + + with patch.dict(core.__salt__, {"cmd.run": MagicMock(side_effect=OSError)}): + ret = core._osx_gpudata() + assert ret == {"num_gpus": 0, "gpus": []} + + +def test_get_master(): + """ + test get_master + """ + ret = core.get_master() + assert "master" in ret + + with patch("salt.grains.core.__opts__", {"master": "test_master_id"}): + ret = core.get_master() + assert "master" in ret + assert ret["master"] == "test_master_id" + + +def test__selinux(): + """ + test _selinux + """ + with patch.dict( + core.__salt__, + { + "cmd.run": MagicMock(return_value="Enforcing"), + "cmd.retcode": MagicMock(return_value=1), + }, + ), patch.object(core, "_linux_bin_exists", MagicMock(return_value=False)): + ret = core._selinux() + assert ret == {"enabled": False} + + with patch.dict( + core.__salt__, + { + "cmd.run": MagicMock(return_value="Enforcing"), + "cmd.retcode": MagicMock(return_value=0), + }, + ), patch.object(core, "_linux_bin_exists", MagicMock(return_value=True)): + ret = core._selinux() + assert ret == {"enabled": True, "enforced": "Enforcing"} + + with patch.dict( + core.__salt__, + { + "cmd.run": MagicMock(return_value="Disabled"), + "cmd.retcode": MagicMock(return_value=0), + }, + ), patch.object(core, "_linux_bin_exists", MagicMock(return_value=True)): + ret = core._selinux() + assert ret == {"enabled": True, "enforced": "Disabled"} + + +def test__systemd(): + """ + test _systemd + """ + with patch.dict( + core.__salt__, + { + "cmd.run": MagicMock( + return_value=( + "systemd 254 (254.3-1)\n+PAM +AUDIT -SELINUX -APPARMOR -IMA +SMACK " + "+SECCOMP +GCRYPT +GNUTLS +OPENSSL +ACL +BLKID +CURL +ELFUTILS " + "+FIDO2 +IDN2 -IDN +IPTC +KMOD +LIBCRYPTSETUP +LIBFDISK +PCRE2 " + "-PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ +ZLIB +ZSTD " + "+BPF_FRAMEWORK +XKBCOMMON +UTMP -SYSVINIT default-hierarchy=unified" + ) + ), + }, + ): + ret = core._systemd() + assert "version" in ret + assert "features" in ret + assert ret["version"] == "254" + assert ret["features"] == ( + "+PAM +AUDIT -SELINUX -APPARMOR -IMA +SMACK +SECCOMP +GCRYPT +GNUTLS +OPENSSL " + "+ACL +BLKID +CURL +ELFUTILS +FIDO2 +IDN2 -IDN +IPTC +KMOD +LIBCRYPTSETUP " + "+LIBFDISK +PCRE2 -PWQUALITY +P11KIT -QRENCODE +TPM2 +BZIP2 +LZ4 +XZ " + "+ZLIB +ZSTD +BPF_FRAMEWORK +XKBCOMMON +UTMP -SYSVINIT default-hierarchy=unified" + ) + + +def test__clean_value_uuid(caplog): + """ + test _clean_value uuid + """ + ret = core._clean_value("key", None) + assert not ret + + ret = core._clean_value("uuid", "49e40e2a-63b4-11ee-8c99-0242ac120002") + assert ret == "49e40e2a-63b4-11ee-8c99-0242ac120002" + + with patch.object(uuid, "UUID", MagicMock()) as mock_uuid: + with caplog.at_level(logging.TRACE): + mock_uuid.side_effect = ValueError() + ret = core._clean_value("uuid", "49e40e2a-63b4-11ee-8c99-0242ac120002") + assert not ret + assert ( + "HW uuid value 49e40e2a-63b4-11ee-8c99-0242ac120002 is an invalid UUID" + in caplog.messages + ) + + +@pytest.mark.parametrize( + "grain,value,expected", + ( + ("kernelrelease", "10.0.14393", "10.0.14393"), + ("kernelversion", "10.0.14393", "10.0.14393"), + ("osversion", "10.0.14393", "10.0.14393"), + ("osrelease", "2016Server", "2016Server"), + ("osrelease", "to be filled", None), + ("osmanufacturer", "Microsoft Corporation", "Microsoft Corporation"), + ("manufacturer", "innotek GmbH", "innotek GmbH"), + ("manufacturer", "to be filled", None), + ("productname", "VirtualBox", "VirtualBox"), + ("biosversion", "Default System BIOS", "Default System BIOS"), + ("serialnumber", "0", None), + ( + "osfullname", + "Microsoft Windows Server 2016 Datacenter", + "Microsoft Windows Server 2016 Datacenter", + ), + ( + "timezone", + "(UTC-08:00) Pacific Time (US & Canada)", + "(UTC-08:00) Pacific Time (US & Canada)", + ), + ( + "uuid", + "d013f373-7331-4a9f-848b-72e379fbe7bf", + "d013f373-7331-4a9f-848b-72e379fbe7bf", + ), + ("windowsdomain", "WORKGROUP", "WORKGROUP"), + ("windowsdomaintype", "Workgroup", "Workgroup"), + ("motherboard.productname", "VirtualBox", "VirtualBox"), + ("motherboard.serialnumber", "0", None), + ("model_name", "Macbook Pro", "Macbook Pro"), + ("system_serialnumber", "W80322MWATM", "W80322MWATM"), + ), +) +def test__clean_value_multiple_values(grain, value, expected): + """ + test _clean_value multiple values + """ + ret = core._clean_value(grain, value) + assert ret == expected + + +def test__linux_init_system(caplog): + """ + test _linux_init_system + """ + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch("salt.utils.files.fopen", MagicMock()) as mock_fopen: + mock_fopen.side_effect = OSError() + ret = core._linux_init_system() + assert ret == "unknown" + + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch("salt.utils.files.fopen", mock_open(read_data="init-not-found")): + mock_fopen.side_effect = OSError() + ret = core._linux_init_system() + assert ret == "unknown" + + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch( + "salt.utils.files.fopen", mock_open(read_data="/usr/sbin/supervisord") + ): + with patch("salt.utils.path.which", return_value="/usr/sbin/supervisord"): + ret = core._linux_init_system() + assert ret == "supervisord" + + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch( + "salt.utils.files.fopen", mock_open(read_data="/usr/sbin/dumb-init") + ): + with patch( + "salt.utils.path.which", + side_effect=["/usr/sbin/dumb-init", "", "/usr/sbin/dumb-init"], + ): + ret = core._linux_init_system() + assert ret == "dumb-init" + + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch("salt.utils.files.fopen", mock_open(read_data="/usr/sbin/tini")): + with patch( + "salt.utils.path.which", + side_effect=["/usr/sbin/tini", "", "", "/usr/sbin/tini"], + ): + ret = core._linux_init_system() + assert ret == "tini" + + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch("salt.utils.files.fopen", mock_open(read_data="runit")): + with patch("salt.utils.path.which", side_effect=["", "", "", ""]): + ret = core._linux_init_system() + assert ret == "runit" + + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch("salt.utils.files.fopen", mock_open(read_data="/sbin/my_init")): + with patch("salt.utils.path.which", side_effect=["", "", "", ""]): + ret = core._linux_init_system() + assert ret == "runit" + + with patch("os.stat", MagicMock()) as mock_os_stat: + mock_os_stat.side_effect = OSError() + with patch("salt.utils.files.fopen", mock_open(read_data="systemd")): + with patch("salt.utils.path.which", side_effect=[IndexError(), "", "", ""]): + with caplog.at_level(logging.WARNING): + ret = core._linux_init_system() + assert ret == "unknown" + assert ( + "Unable to fetch data from /proc/1/cmdline" in caplog.messages + ) + + +def test_default_gateway(): + """ + test default_gateway + """ + + with patch("salt.utils.path.which", return_value=""): + ret = core.default_gateway() + assert ret == {} + + with patch("salt.utils.path.which", return_value="/usr/sbin/ip"): + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(return_value="")}, + ): + + ret = core.default_gateway() + assert ret == {"ip_gw": False, "ip4_gw": False, "ip6_gw": False} + + with patch("salt.utils.path.which", return_value="/usr/sbin/ip"): + ip4_route = """default via 172.23.5.3 dev enp7s0u2u4 proto dhcp src 172.23.5.173 metric 100 +172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 +172.19.0.0/16 dev docker_gwbridge proto kernel scope link src 172.19.0.1 +172.23.5.0/24 dev enp7s0u2u4 proto kernel scope link src 172.23.5.173 metric 100 +192.168.56.0/24 dev vboxnet0 proto kernel scope link src 192.168.56.1""" + + ip6_route = """2603:8001:b402:cc00::/64 dev enp7s0u2u4 proto ra metric 100 pref medium +fe80::/64 dev enp7s0u2u4 proto kernel metric 1024 pref medium +default via fe80::20d:b9ff:fe37:e65c dev enp7s0u2u4 proto ra metric 100 pref medium""" + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=[ip4_route, ip6_route])}, + ): + + ret = core.default_gateway() + assert ret == { + "ip4_gw": "172.23.5.3", + "ip6_gw": "fe80::20d:b9ff:fe37:e65c", + "ip_gw": True, + } + + with patch("salt.utils.path.which", return_value="/usr/sbin/ip"): + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=[ip4_route, ip6_route])}, + ): + + ret = core.default_gateway() + assert ret == { + "ip4_gw": "172.23.5.3", + "ip6_gw": "fe80::20d:b9ff:fe37:e65c", + "ip_gw": True, + } + + with patch("salt.utils.path.which", return_value="/usr/sbin/ip"): + ip_route = """default +172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 +172.19.0.0/16 dev docker_gwbridge proto kernel scope link src 172.19.0.1 +172.23.5.0/24 dev enp7s0u2u4 proto kernel scope link src 172.23.5.173 metric 100 +192.168.56.0/24 dev vboxnet0 proto kernel scope link src 192.168.56.1""" + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=[ip_route])}, + ): + + ret = core.default_gateway() + assert ret == {"ip_gw": True, "ip4_gw": True, "ip6_gw": False} + + +def test__osx_platform_data(): + """ + test _osx_platform_data + """ + osx_platform_data = """Hardware: + + Hardware Overview: + + Model Name: MacBook Pro + Model Identifier: MacBookPro7,1 + Processor Name: Intel Core 2 Duo + Processor Speed: 2.4 GHz + Number of Processors: 1 + Total Number of Cores: 2 + L2 Cache: 3 MB + Memory: 16 GB + System Firmware Version: 68.0.0.0.0 + OS Loader Version: 540.120.3~22 + SMC Version (system): 1.62f7 + Serial Number (system): W80322MWATM + Hardware UUID: 3FA5BDA2-A740-5DF3-8A97-D9D4DB1CE24A + Provisioning UDID: 3FA5BDA2-A740-5DF3-8A97-D9D4DB1CE24A + Sudden Motion Sensor: + State: Enabled""" + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(return_value=osx_platform_data)}, + ): + + ret = core._osx_platform_data() + assert ret == { + "model_name": "MacBook Pro", + "smc_version": "1.62f7", + "system_serialnumber": "W80322MWATM", + } + + osx_platform_data = """Hardware: + + Hardware Overview: + + Model Name: MacBook Pro + Model Identifier: MacBookPro7,1 + Processor Name: Intel Core 2 Duo + Processor Speed: 2.4 GHz + Number of Processors: 1 + Total Number of Cores: 2 + L2 Cache: 3 MB + Memory: 16 GB + System Firmware Version: 68.0.0.0.0 + Boot ROM Version: 139.0.0.0.0 + OS Loader Version: 540.120.3~22 + SMC Version (system): 1.62f7 + Serial Number (system): W80322MWATM + Hardware UUID: 3FA5BDA2-A740-5DF3-8A97-D9D4DB1CE24A + Provisioning UDID: 3FA5BDA2-A740-5DF3-8A97-D9D4DB1CE24A + Sudden Motion Sensor: + State: Enabled""" + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(return_value=osx_platform_data)}, + ): + + ret = core._osx_platform_data() + assert ret == { + "model_name": "MacBook Pro", + "smc_version": "1.62f7", + "system_serialnumber": "W80322MWATM", + "boot_rom_version": "139.0.0.0.0", + } + + +def test__parse_junos_showver(): + """ + test _parse_junos_showver + """ + + txt = b"""Hostname: R1-MX960-re0 +Model: mx960 +Junos: 18.2R3-S2.9 +JUNOS Software Release [18.2R3-S2.9]""" + + ret = core._parse_junos_showver(txt) + assert ret == { + "model": "mx960", + "osrelease": "18.2R3-S2.9", + "osmajorrelease": "Junos: 18", + "osrelease_info": ["Junos: 18", "2R3-S2", "9"], + } + + txt = b"""Model: mx240 +Junos: 15.1F2.8 +JUNOS OS Kernel 64-bit [20150814.313820_builder_stable_10] +JUNOS OS runtime [20150814.313820_builder_stable_10] +JUNOS OS time zone information [20150814.313820_builder_stable_10] +JUNOS OS 32-bit compatibility [20150814.313820_builder_stable_10] +JUNOS py base [20150814.204717_builder_junos_151_f2] +JUNOS OS crypto [20150814.313820_builder_stable_10] +JUNOS network stack and utilities [20150814.204717_builder_junos_151_f2] +JUNOS libs compat32 [20150814.204717_builder_junos_151_f2] +JUNOS runtime [20150814.204717_builder_junos_151_f2] +JUNOS platform support [20150814.204717_builder_junos_151_f2] +JUNOS modules [20150814.204717_builder_junos_151_f2] +JUNOS libs [20150814.204717_builder_junos_151_f2] +JUNOS daemons [20150814.204717_builder_junos_151_f2] +JUNOS FIPS mode utilities [20150814.204717_builder_junos_151_f2]""" + + ret = core._parse_junos_showver(txt) + assert ret == { + "model": "mx240", + "osrelease": "15.1F2.8", + "osmajorrelease": "Junos: 15", + "osrelease_info": ["Junos: 15", "1F2", "8"], + "kernelversion": "JUNOS OS Kernel 64-bit [20150814.313820_builder_stable_10]", + "kernelrelease": "20150814.313820_builder_stable_10", + } + + +def test__bsd_cpudata_freebsd(): + """ + test _bsd_cpudata for FreeBSD + """ + osdata = {"kernel": "FreeBSD"} + mock_cmd_run = ["1", "amd64", "Intel(R) Core(TM) i7-10850H CPU @ 2.7.0GHz"] + + dmesg_mock = """Copyright (c) 1992-2021 The FreeBSD Project. +Copyright (c) 1979, 1980, 1983, 1986, 1988, 1989, 1991, 1992, 1993, 1994 + The Regents of the University of California. All rights reserved. +FreeBSD is a registered trademark of The FreeBSD Foundation. +FreeBSD 13.2-RELEASE releng/13.2-n254617-525ecfdad597 GENERIC amd64 +FreeBSD clang version 14.0.5 (https://github.com/llvm/llvm-project.git llvmorg-14.0.5-0-gc12386ae247c) +VT(vga): text 80x25 +CPU: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz (2712.13-MHz K8-class CPU) + Origin="GenuineIntel" Id=0xa0652 Family=0x6 Model=0xa5 Stepping=2 + Features=0x1783fbff + Features2=0x5eda220b + AMD Features=0x28100800 + AMD Features2=0x121 + Structured Extended Features=0x842529 + Structured Extended Features3=0x30000400 + TSC: P-state invariant +real memory = 1073676288 (1023 MB) +avail memory = 995774464 (949 MB) +Event timer "LAPIC" quality 100 +ACPI APIC Table: +random: registering fast source Intel Secure Key RNG +random: fast provider: "Intel Secure Key RNG" +random: unblocking device. +ioapic0: MADT APIC ID 1 != hw id 0 +ioapic0 irqs 0-23 +random: entropy device external interface +kbd1 at kbdmux0 +vtvga0: +smbios0: at iomem 0xfff60-0xfff7e +smbios0: Version: 2.5, BCD Revision: 2.5 +aesni0: +acpi0: +acpi0: Power Button (fixed) +acpi0: Sleep Button (fixed) +cpu0: on acpi0 +attimer0: port 0x40-0x43,0x50-0x53 on acpi0 +Timecounter "i8254" frequency 1193182 Hz quality 0 +Event timer "i8254" frequency 1193182 Hz quality 100 +Timecounter "ACPI-fast" frequency 3579545 Hz quality 900 +acpi_timer0: <32-bit timer at 3.579545MHz> port 0x4008-0x400b on acpi0 +pcib0: port 0xcf8-0xcff on acpi0 +pci0: on pcib0 +isab0: at device 1.0 on pci0 +isa0: on isab0 +atapci0: port 0x1f0-0x1f7,0x3f6,0x170-0x177,0x376,0xd000-0xd00f at device 1.1 on pci0 +ata0: at channel 0 on atapci0 +ata1: at channel 1 on atapci0 +vgapci0: port 0xd010-0xd01f mem 0xe0000000-0xe3ffffff,0xf0000000-0xf01fffff irq 18 at device 2.0 on pci0 +vgapci0: Boot video device +em0: port 0xd020-0xd027 mem 0xf0200000-0xf021ffff irq 19 at device 3.0 on pci0 +em0: Using 1024 TX descriptors and 1024 RX descriptors +em0: Ethernet address: 08:00:27:ae:76:42 +em0: netmap queues/slots: TX 1/1024, RX 1/1024 +pcm0: port 0xd100-0xd1ff,0xd200-0xd23f irq 21 at device 5.0 on pci0 +pcm0: +ohci0: mem 0xf0804000-0xf0804fff irq 22 at device 6.0 on pci0 +usbus0 on ohci0 +pci0: at device 7.0 (no driver attached) +ehci0: mem 0xf0805000-0xf0805fff irq 19 at device 11.0 on pci0 +usbus1: EHCI version 1.0 +usbus1 on ehci0 +battery0: on acpi0 +acpi_acad0: on acpi0 +atkbdc0: port 0x60,0x64 irq 1 on acpi0 +atkbd0: irq 1 on atkbdc0 +kbd0 at atkbd0 +atkbd0: [GIANT-LOCKED] +psm0: irq 12 on atkbdc0 +psm0: [GIANT-LOCKED] +WARNING: Device "psm" is Giant locked and may be deleted before FreeBSD 14.0. +psm0: model IntelliMouse Explorer, device ID 4 +orm0: at iomem 0xc0000-0xc7fff pnpid ORM0000 on isa0 +vga0: at port 0x3c0-0x3df iomem 0xa0000-0xbffff pnpid PNP0900 on isa0 +atrtc0: at port 0x70 irq 8 on isa0 +atrtc0: registered as a time-of-day clock, resolution 1.000000s +Event timer "RTC" frequency 32768 Hz quality 0 +atrtc0: non-PNP ISA device will be removed from GENERIC in FreeBSD 14. +Timecounter "TSC-low" frequency 1356006904 Hz quality 1000 +Timecounters tick every 10.000 msec +ZFS filesystem version: 5 +ZFS storage pool version: features support (5000) +usbus0: 12Mbps Full Speed USB v1.0 +usbus1: 480Mbps High Speed USB v2.0 +pcm0: measured ac97 link rate at 44717 Hz +ugen1.1: at usbus1 +uhub0 on usbus1 +uhub0: on usbus1 +ugen0.1: at usbus0 +uhub1 on usbus0 +uhub1: on usbus0 +Trying to mount root from zfs:zroot/ROOT/default []... +uhub1: 12 ports with 12 removable, self powered +ada0 at ata0 bus 0 scbus0 target 0 lun 0 +ada0: ATA-6 device +ada0: Serial Number VBf824a3f1-4ad9d778 +ada0: 33.300MB/s transfers (UDMA2, PIO 65536bytes) +ada0: 16384MB (33554432 512 byte sectors) +Root mount waiting for: usbus1 +Root mount waiting for: usbus1 +Root mount waiting for: usbus1 +Root mount waiting for: usbus1 +Root mount waiting for: usbus1 +uhub0: 12 ports with 12 removable, self powered +intsmb0: irq 23 at device 7.0 on pci0 +intsmb0: intr IRQ 9 enabled revision 0 +smbus0: on intsmb0 +lo0: link state changed to UP +em0: link state changed to UP""" + + with patch("salt.utils.path.which", return_value="/sbin/sysctl"): + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + with patch("os.path.isfile", return_value=True): + with patch("salt.utils.files.fopen", mock_open(read_data=dmesg_mock)): + ret = core._bsd_cpudata(osdata) + assert "num_cpus" in ret + assert ret["num_cpus"] == 1 + + assert "cpuarch" in ret + assert ret["cpuarch"] == "amd64" + + assert "cpu_model" in ret + assert ( + ret["cpu_model"] == "Intel(R) Core(TM) i7-10850H CPU @ 2.7.0GHz" + ) + + assert "cpu_flags" in ret + assert ret["cpu_flags"] == [ + "FPU", + "VME", + "DE", + "PSE", + "TSC", + "MSR", + "PAE", + "MCE", + "CX8", + "APIC", + "SEP", + "MTRR", + "PGE", + "MCA", + "CMOV", + "PAT", + "PSE36", + "MMX", + "FXSR", + "SSE", + "SSE2", + "HTT", + "SSE3", + "PCLMULQDQ", + "MON", + "SSSE3", + "CX16", + "PCID", + "SSE4.1", + "SSE4.2", + "MOVBE", + "POPCNT", + "AESNI", + "XSAVE", + "OSXSAVE", + "AVX", + "RDRAND", + "SYSCALL", + "NX", + "RDTSCP", + "LM", + "LAHF", + "ABM", + "Prefetch", + "FSGSBASE", + "BMI1", + "AVX2", + "BMI2", + "INVPCID", + "NFPUSG", + "RDSEED", + "CLFLUSHOPT", + "MD_CLEAR", + "L1DFL", + "ARCH_CAP", + ] + + +def test__bsd_cpudata_netbsd(): + """ + test _bsd_cpudata for NetBSD + """ + osdata = {"kernel": "NetBSD"} + mock_cpuctl_identify = """cpu0: highest basic info 00000016 +cpu0: highest extended info 80000008 +cpu0: "Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz" +cpu0: Intel 10th gen Core (Comet Lake) (686-class), 2753.71 MHz +cpu0: family 0x6 model 0xa5 stepping 0x2 (id 0xa0652) +cpu0: features 0x178bfbff +cpu0: features 0x178bfbff +cpu0: features1 0x5eda220b +cpu0: features1 0x5eda220b +cpu0: features2 0x28100800 +cpu0: features3 0x121 +cpu0: features5 0x842529 +cpu0: features5 0x842529 +cpu0: features7 0x30000400 +cpu0: xsave features 0x7 +cpu0: xsave area size: current 832, maximum 832, xgetbv enabled +cpu0: enabled xsave 0x7 +cpu0: I-cache: 32KB 64B/line 8-way, D-cache: 32KB 64B/line 8-way +cpu0: L2 cache: 256KB 64B/line 4-way +cpu0: L3 cache: 12MB 64B/line 16-way +cpu0: 64B prefetching +cpu0: ITLB: 64 4KB entries 8-way, 8 2M/4M entries +cpu0: DTLB: 64 4KB entries 4-way, 4 1GB entries 4-way +cpu0: L2 STLB: 1536 4KB entries 6-way +cpu0: Initial APIC ID 0 +cpu0: Cluster/Package ID 0 +cpu0: Core ID 0 +cpu0: SMT ID 0 +cpu0: monitor-line size 64 +cpu0: SEF highest subleaf 00000000 +cpu0: Power Management features: 0x100 +cpu0: microcode version 0x0, platform ID 0""" + mock_cmd_run = [ + "1", + "amd64", + "Intel(R) Core(TM) i7-10850H CPU @ 2.7.0GHz", + mock_cpuctl_identify, + ] + + with patch("salt.utils.path.which", return_value="/sbin/sysctl"): + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._bsd_cpudata(osdata) + assert "num_cpus" in ret + assert ret["num_cpus"] == 1 + + assert "cpuarch" in ret + assert ret["cpuarch"] == "amd64" + + assert "cpu_model" in ret + assert ret["cpu_model"] == "Intel(R) Core(TM) i7-10850H CPU @ 2.7.0GHz" + + +def test__bsd_cpudata_darwin(): + """ + test _bsd_cpudata for Darwin + """ + osdata = {"kernel": "Darwin"} + mock_cmd_run = [ + "1", + "x86_64", + "Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz", + "FPU VME DE PSE TSC MSR PAE MCE CX8 APIC SEP MTRR PGE MCA CMOV PAT PSE36 CLFSH DS ACPI MMX FXSR SSE SSE2 SS HTT TM PBE SSE3 DTES64 MON DSCPL VMX SMX EST TM2 SSSE3 CX16 TPR PDCM SSE4.1 XSAVE", + ] + + with patch("salt.utils.path.which", return_value="/sbin/sysctl"): + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._bsd_cpudata(osdata) + assert "num_cpus" in ret + assert ret["num_cpus"] == 1 + + assert "cpuarch" in ret + assert ret["cpuarch"] == "x86_64" + + assert "cpu_model" in ret + assert ret["cpu_model"] == "Intel(R) Core(TM)2 Duo CPU P8600 @ 2.40GHz" + + assert "cpu_flags" in ret + assert ret["cpu_flags"] == [ + "FPU", + "VME", + "DE", + "PSE", + "TSC", + "MSR", + "PAE", + "MCE", + "CX8", + "APIC", + "SEP", + "MTRR", + "PGE", + "MCA", + "CMOV", + "PAT", + "PSE36", + "CLFSH", + "DS", + "ACPI", + "MMX", + "FXSR", + "SSE", + "SSE2", + "SS", + "HTT", + "TM", + "PBE", + "SSE3", + "DTES64", + "MON", + "DSCPL", + "VMX", + "SMX", + "EST", + "TM2", + "SSSE3", + "CX16", + "TPR", + "PDCM", + "SSE4.1", + "XSAVE", + ] + + +def test__bsd_cpudata_openbsd(): + """ + test _bsd_cpudata for OpenBSD + """ + osdata = {"kernel": "OpenBSD"} + mock_cmd_run = ["1", "amd64", "Intel(R) Core(TM) i7-10850H CPU @ 2.7.0GHz", "amd64"] + + with patch("salt.utils.path.which", return_value="/sbin/sysctl"): + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._bsd_cpudata(osdata) + assert "num_cpus" in ret + assert ret["num_cpus"] == 1 + + assert "cpuarch" in ret + assert ret["cpuarch"] == "amd64" + + assert "cpu_model" in ret + assert ret["cpu_model"] == "Intel(R) Core(TM) i7-10850H CPU @ 2.7.0GHz" + + +def test__netbsd_gpu_data(): + """ + test _netbsd_gpu_data + """ + mock_pcictl = """000:00:0: Intel 82441FX (PMC) PCI and Memory Controller (host bridge, revision 0x02) +000:01:0: Intel 82371SB (PIIX3) PCI-ISA Bridge (ISA bridge) +000:01:1: Intel 82371AB (PIIX4) IDE Controller (IDE mass storage, interface 0x8a, revision 0x01) +000:02.0: VGA compatible controller: Intel Corporation CometLake-H GT2 [UHD Graphics] (rev 05) +000:02:0: Intel CometLake-H GT2 [UHD Graphics] (VGA display) +000:03:0: Intel i82540EM 1000baseT Ethernet (ethernet network, revision 0x02) +000:04:0: VirtualBox Guest Service (miscellaneous system) +000:05:0: Intel 82801AA AC-97 Audio Controller (audio multimedia, revision 0x01) +000:06:0: Apple Computer Intrepid USB Controller (USB serial bus, OHCI) +000:07:0: Intel 82371AB (PIIX4) Power Management Controller (miscellaneous bridge, revision 0x08) +000:11:0: Intel 82801FB/FR USB EHCI Controller (USB serial bus, EHCI)""" + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(return_value=mock_pcictl)}, + ): + ret = core._netbsd_gpu_data() + assert ret == { + "num_gpus": 1, + "gpus": [{"vendor": "Intel", "model": "CometLake-H GT2 [UHD Graphics]"}], + } + + with patch.dict(core.__salt__, {"cmd.run": MagicMock(side_effect=OSError)}): + ret = core._netbsd_gpu_data() + assert ret == {"gpus": [], "num_gpus": 0} + + +def test__bsd_memdata(): + """ + test _bsd_memdata + """ + osdata = {"kernel": "OpenBSD"} + + with patch("salt.utils.path.which", side_effect=["/sbin/sysctl", "/sbin/swapctl"]): + + mock_cmd_run = [ + "1073278976", + "total: 1048559 KBytes allocated, 0 KBytes used, 1048559 KBytes available", + ] + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._bsd_memdata(osdata) + assert ret == {"mem_total": 1023, "swap_total": 0} + + osdata = {"kernel": "NetBSD"} + + with patch("salt.utils.path.which", side_effect=["/sbin/sysctl", "/sbin/swapctl"]): + + mock_cmd_run = [ + "1073278976", + "total: 1048559 KBytes allocated, 0 KBytes used, 1048559 KBytes available", + ] + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._bsd_memdata(osdata) + assert ret == {"mem_total": 1023, "swap_total": 0} + + with patch("salt.utils.path.which", side_effect=["/sbin/sysctl", "/sbin/swapctl"]): + + mock_cmd_run = [ + "-", + "1073278976", + "total: 1048559 KBytes allocated, 0 KBytes used, 1048559 KBytes available", + ] + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._bsd_memdata(osdata) + assert ret == {"mem_total": 1023, "swap_total": 0} + + with patch("salt.utils.path.which", side_effect=["/sbin/sysctl", "/sbin/swapctl"]): + + mock_cmd_run = ["-", "1073278976", "no swap devices configured"] + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._bsd_memdata(osdata) + assert ret == {"mem_total": 1023, "swap_total": 0} + + with patch("salt.utils.path.which", side_effect=["/sbin/sysctl", "/sbin/swapctl"]): + + mock_cmd_run = ["-", "1073278976", "no swap devices configured"] + + with patch.dict( + core.__salt__, + {"cmd.run": MagicMock(side_effect=mock_cmd_run)}, + ): + ret = core._memdata(osdata) + assert ret == {"mem_total": 1023, "swap_total": 0} + + +def test__ps(): + """ + test _ps + """ + osdata = {"os_family": ""} + + for bsd in ["FreeBSD", "NetBSD", "OpenBSD", "MacOS"]: + osdata = {"os": bsd} + ret = core._ps(osdata) + assert ret == {"ps": "ps auxwww"} + + osdata = {"os_family": "Solaris", "os": ""} + ret = core._ps(osdata) + assert ret == {"ps": "/usr/ucb/ps auxwww"} + + osdata = {"os": "Windows", "os_family": ""} + ret = core._ps(osdata) + assert ret == {"ps": "tasklist.exe"} + + osdata = {"os": "", "os_family": "AIX"} + ret = core._ps(osdata) + assert ret == {"ps": "/usr/bin/ps auxww"} + + osdata = {"os": "", "os_family": "NILinuxRT"} + ret = core._ps(osdata) + assert ret == {"ps": "ps -o user,pid,ppid,tty,time,comm"} + + osdata = {"os": "", "os_family": "", "virtual": "openvzhn"} + ret = core._ps(osdata) + assert ret == { + "ps": ( + 'ps -fH -p $(grep -l "^envID:[[:space:]]*0\\$" ' + '/proc/[0-9]*/status | sed -e "s=/proc/\\([0-9]*\\)/.*=\\1=") ' + "| awk '{ $7=\"\"; print }'" + ) + } diff --git a/tests/pytests/unit/modules/test_debian_ip.py b/tests/pytests/unit/modules/test_debian_ip.py index d569e8ace4ad..abbc0c61d8ec 100644 --- a/tests/pytests/unit/modules/test_debian_ip.py +++ b/tests/pytests/unit/modules/test_debian_ip.py @@ -1,4 +1,1213 @@ +import tempfile + +import jinja2.exceptions +import pytest + import salt.modules.debian_ip as debian_ip +import salt.utils.files +from tests.support.mock import MagicMock, patch + +try: + from salt.utils.odict import OrderedDict as odict +except ImportError: + from collections import OrderedDict as odict + +# Big pile of interface data for unit tests +# To skip, search for 'DebianIpTestCase' +# fmt: off + + +pytestmark = [ + pytest.mark.skip_on_windows(reason="Do not run these tests on Windows"), + pytest.mark.skip_on_darwin(reason="Do not run these tests on Mac"), +] + + +@pytest.fixture +def test_interfaces(): + return [ + # Structure + #{'iface_name': 'ethX', 'iface_type': 'eth', 'enabled': True, + # 'skip_test': bool(), # True to disable this test + # 'build_interface': dict(), # data read from sls + # 'get_interface(): OrderedDict(), # data read from interfaces file + # 'return': list()}, # jinja-rendered data + + # IPv4-only interface; single address + {'iface_name': 'eth1', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '192.168.4.9', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'enable_ipv6': False, + 'noifupdown': True, + }, + 'get_interface': odict([('eth1', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '192.168.4.9'), + ('netmask', '255.255.255.0'), + ('gateway', '192.168.4.1'), + ])), + ]))]))]), + 'return': [ + 'auto eth1\n', + 'iface eth1 inet static\n', + ' address 192.168.4.9\n', + ' netmask 255.255.255.0\n', + ' gateway 192.168.4.1\n', + '\n']}, + + # IPv6-only; single address + {'iface_name': 'eth2', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:beef::3', + 'ipv6netmask': '64', + 'ipv6gateway': '2001:db8:dead:beef::1', + 'enable_ipv6': True, + 'noifupdown': True, + }, + 'get_interface': odict([('eth2', odict([('enabled', True), ('data', odict([ + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:beef::3'), + ('netmask', 64), + ('gateway', '2001:db8:dead:beef::1'), + ])), + ]))]))]), + 'return': [ + 'auto eth2\n', + 'iface eth2 inet6 static\n', + ' address 2001:db8:dead:beef::3\n', + ' netmask 64\n', + ' gateway 2001:db8:dead:beef::1\n', + '\n']}, + + # IPv6-only; multiple addrs; no gw; first addr from ipv6addr + {'iface_name': 'eth3', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:beef::5/64', + 'ipv6ipaddrs': [ + '2001:db8:dead:beef::7/64', + '2001:db8:dead:beef::8/64', + '2001:db8:dead:beef::9/64'], + 'enable_ipv6': True, + 'noifupdown': True, + }, + 'get_interface': odict([('eth3', odict([('enabled', True), ('data', odict([ + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:beef::5/64'), + ('addresses', [ + '2001:db8:dead:beef::7/64', + '2001:db8:dead:beef::8/64', + '2001:db8:dead:beef::9/64', + ]), + ])), + ]))]))]), + 'return': [ + 'auto eth3\n', + 'iface eth3 inet6 static\n', + ' address 2001:db8:dead:beef::5/64\n', + ' address 2001:db8:dead:beef::7/64\n', + ' address 2001:db8:dead:beef::8/64\n', + ' address 2001:db8:dead:beef::9/64\n', + '\n']}, + + # IPv6-only; multiple addresses + {'iface_name': 'eth4', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'ipv6proto': 'static', + 'ipv6ipaddrs': [ + '2001:db8:dead:beef::5/64', + '2001:db8:dead:beef::7/64', + '2001:db8:dead:beef::8/64', + '2001:db8:dead:beef::9/64'], + 'ipv6gateway': '2001:db8:dead:beef::1', + 'enable_ipv6': True, + 'noifupdown': True, + }, + 'get_interface': odict([('eth4', odict([('enabled', True), ('data', odict([ + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:beef::5/64'), + ('addresses', [ + '2001:db8:dead:beef::7/64', + '2001:db8:dead:beef::8/64', + '2001:db8:dead:beef::9/64', + ]), + ('gateway', '2001:db8:dead:beef::1'), + ])), + ]))]))]), + 'return': [ + 'auto eth4\n', + 'iface eth4 inet6 static\n', + ' address 2001:db8:dead:beef::5/64\n', + ' address 2001:db8:dead:beef::7/64\n', + ' address 2001:db8:dead:beef::8/64\n', + ' address 2001:db8:dead:beef::9/64\n', + ' gateway 2001:db8:dead:beef::1\n', + '\n']}, + + # IPv4 and IPv6 settings with v4 disabled + {'iface_name': 'eth5', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '192.168.4.9', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:beef::3', + 'ipv6netmask': '64', + 'ipv6gateway': '2001:db8:dead:beef::1', + 'enable_ipv4': False, + 'noifupdown': True, + }, + 'get_interface': odict([('eth5', odict([('enabled', True), ('data', odict([ + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:beef::3'), + ('netmask', 64), + ('gateway', '2001:db8:dead:beef::1'), + ])), + ]))]))]), + 'return': [ + 'auto eth5\n', + 'iface eth5 inet6 static\n', + ' address 2001:db8:dead:beef::3\n', + ' netmask 64\n', + ' gateway 2001:db8:dead:beef::1\n', + '\n']}, + + # IPv4 and IPv6 settings with v6 disabled + {'iface_name': 'eth6', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '192.168.4.9', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:beef::3', + 'ipv6netmask': '64', + 'ipv6gateway': '2001:db8:dead:beef::1', + 'enable_ipv6': False, + 'noifupdown': True, + }, + 'get_interface': odict([('eth6', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '192.168.4.9'), + ('netmask', '255.255.255.0'), + ('gateway', '192.168.4.1'), + ])), + ]))]))]), + 'return': [ + 'auto eth6\n', + 'iface eth6 inet static\n', + ' address 192.168.4.9\n', + ' netmask 255.255.255.0\n', + ' gateway 192.168.4.1\n', + '\n']}, + + # IPv4 and IPv6; shared/overridden settings + {'iface_name': 'eth7', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '192.168.4.9', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:beef::3', + 'ipv6netmask': '64', + 'ipv6gateway': '2001:db8:dead:beef::1', + 'ttl': '18', # shared + 'ipv6ttl': '15', # overridden for v6 + 'mtu': '1480', # shared + 'enable_ipv6': True, + 'noifupdown': True, + }, + 'get_interface': odict([('eth7', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '192.168.4.9'), + ('netmask', '255.255.255.0'), + ('gateway', '192.168.4.1'), + ('ttl', 18), + ('mtu', 1480), + ])), + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:beef::3'), + ('netmask', 64), + ('gateway', '2001:db8:dead:beef::1'), + ('ttl', 15), + ('mtu', 1480), + ])), + ]))]))]), + 'return': [ + 'auto eth7\n', + 'iface eth7 inet static\n', + ' address 192.168.4.9\n', + ' netmask 255.255.255.0\n', + ' gateway 192.168.4.1\n', + ' ttl 18\n', + ' mtu 1480\n', + 'iface eth7 inet6 static\n', + ' address 2001:db8:dead:beef::3\n', + ' netmask 64\n', + ' gateway 2001:db8:dead:beef::1\n', + ' ttl 15\n', + ' mtu 1480\n', + '\n']}, + + # Slave iface + {'iface_name': 'eth8', 'iface_type': 'slave', 'enabled': True, + 'build_interface': { + 'master': 'bond0', + 'noifupdown': True, + }, + 'get_interface': odict([('eth8', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'manual'), + ('filename', None), + ('bonding', odict([ + ('master', 'bond0'), + ])), + ('bonding_keys', ['master']), + ])), + ]))]))]), + 'return': [ + 'auto eth8\n', + 'iface eth8 inet manual\n', + ' bond-master bond0\n', + '\n']}, + + # Bond; with address IPv4 and IPv6 address; slaves as string + {'iface_name': 'bond9', 'iface_type': 'bond', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '10.1.0.14', + 'netmask': '255.255.255.0', + 'gateway': '10.1.0.1', + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:c0::3', + 'ipv6netmask': '64', + 'ipv6gateway': '2001:db8:dead:c0::1', + 'mode': '802.3ad', + 'slaves': 'eth4 eth5', + 'enable_ipv6': True, + 'noifupdown': True, + }, + 'get_interface': odict([('bond9', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '10.1.0.14'), + ('netmask', '255.255.255.0'), + ('gateway', '10.1.0.1'), + ('bonding', odict([ + ('ad_select', '0'), + ('downdelay', '200'), + ('lacp_rate', '0'), + ('miimon', '100'), + ('mode', '4'), + ('slaves', 'eth4 eth5'), + ('updelay', '0'), + ('use_carrier', 'on'), + ])), + ('bonding_keys', [ + 'ad_select', + 'downdelay', + 'lacp_rate', + 'miimon', + 'mode', + 'slaves', + 'updelay', + 'use_carrier', + ]), + ])), + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:c0::3'), + ('netmask', 64), + ('gateway', '2001:db8:dead:c0::1'), + ('bonding', odict([ + ('ad_select', '0'), + ('downdelay', '200'), + ('lacp_rate', '0'), + ('miimon', '100'), + ('mode', '4'), + ('slaves', 'eth4 eth5'), + ('updelay', '0'), + ('use_carrier', 'on'), + ])), + ('bonding_keys', [ + 'ad_select', + 'downdelay', + 'lacp_rate', + 'miimon', + 'mode', + 'slaves', + 'updelay', + 'use_carrier', + ]), + ])), + ]))]))]), + 'return': [ + 'auto bond9\n', + 'iface bond9 inet static\n', + ' address 10.1.0.14\n', + ' netmask 255.255.255.0\n', + ' gateway 10.1.0.1\n', + ' bond-ad_select 0\n', + ' bond-downdelay 200\n', + ' bond-lacp_rate 0\n', + ' bond-miimon 100\n', + ' bond-mode 4\n', + ' bond-slaves eth4 eth5\n', + ' bond-updelay 0\n', + ' bond-use_carrier on\n', + 'iface bond9 inet6 static\n', + ' address 2001:db8:dead:c0::3\n', + ' netmask 64\n', + ' gateway 2001:db8:dead:c0::1\n', + ' bond-ad_select 0\n', + ' bond-downdelay 200\n', + ' bond-lacp_rate 0\n', + ' bond-miimon 100\n', + ' bond-mode 4\n', + ' bond-slaves eth4 eth5\n', + ' bond-updelay 0\n', + ' bond-use_carrier on\n', + '\n']}, + + # Bond; with address IPv4 and IPv6 address; slaves as list + {'iface_name': 'bond10', 'iface_type': 'bond', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '10.1.0.14', + 'netmask': '255.255.255.0', + 'gateway': '10.1.0.1', + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:c0::3', + 'ipv6netmask': '64', + 'ipv6gateway': '2001:db8:dead:c0::1', + 'mode': '802.3ad', + 'slaves': ['eth4', 'eth5'], + 'enable_ipv6': True, + 'noifupdown': True, + }, + 'get_interface': odict([('bond10', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '10.1.0.14'), + ('netmask', '255.255.255.0'), + ('gateway', '10.1.0.1'), + ('bonding', odict([ + ('ad_select', '0'), + ('downdelay', '200'), + ('lacp_rate', '0'), + ('miimon', '100'), + ('mode', '4'), + ('slaves', 'eth4 eth5'), + ('updelay', '0'), + ('use_carrier', 'on'), + ])), + ('bonding_keys', [ + 'ad_select', + 'downdelay', + 'lacp_rate', + 'miimon', + 'mode', + 'slaves', + 'updelay', + 'use_carrier', + ]), + ])), + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:c0::3'), + ('netmask', 64), + ('gateway', '2001:db8:dead:c0::1'), + ('bonding', odict([ + ('ad_select', '0'), + ('downdelay', '200'), + ('lacp_rate', '0'), + ('miimon', '100'), + ('mode', '4'), + ('slaves', 'eth4 eth5'), + ('updelay', '0'), + ('use_carrier', 'on'), + ])), + ('bonding_keys', [ + 'ad_select', + 'downdelay', + 'lacp_rate', + 'miimon', + 'mode', + 'slaves', + 'updelay', + 'use_carrier', + ]), + ])), + ]))]))]), + 'return': [ + 'auto bond10\n', + 'iface bond10 inet static\n', + ' address 10.1.0.14\n', + ' netmask 255.255.255.0\n', + ' gateway 10.1.0.1\n', + ' bond-ad_select 0\n', + ' bond-downdelay 200\n', + ' bond-lacp_rate 0\n', + ' bond-miimon 100\n', + ' bond-mode 4\n', + ' bond-slaves eth4 eth5\n', + ' bond-updelay 0\n', + ' bond-use_carrier on\n', + 'iface bond10 inet6 static\n', + ' address 2001:db8:dead:c0::3\n', + ' netmask 64\n', + ' gateway 2001:db8:dead:c0::1\n', + ' bond-ad_select 0\n', + ' bond-downdelay 200\n', + ' bond-lacp_rate 0\n', + ' bond-miimon 100\n', + ' bond-mode 4\n', + ' bond-slaves eth4 eth5\n', + ' bond-updelay 0\n', + ' bond-use_carrier on\n', + '\n']}, + + # Bond VLAN; with IPv4 address + {'iface_name': 'bond0.11', 'iface_type': 'vlan', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '10.7.0.8', + 'netmask': '255.255.255.0', + 'gateway': '10.7.0.1', + 'slaves': 'eth6 eth7', + 'mode': '802.3ad', + 'enable_ipv6': False, + 'noifupdown': True, + }, + 'get_interface': odict([('bond0.11', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('vlan_raw_device', 'bond1'), + ('address', '10.7.0.8'), + ('netmask', '255.255.255.0'), + ('gateway', '10.7.0.1'), + ('mode', '802.3ad'), + ])), + ]))]))]), + 'return': [ + 'auto bond0.11\n', + 'iface bond0.11 inet static\n', + ' vlan-raw-device bond1\n', + ' address 10.7.0.8\n', + ' netmask 255.255.255.0\n', + ' gateway 10.7.0.1\n', + ' mode 802.3ad\n', + '\n']}, + + # Bond; without address + {'iface_name': 'bond0.12', 'iface_type': 'vlan', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'slaves': 'eth6 eth7', + 'mode': '802.3ad', + 'enable_ipv6': False, + 'noifupdown': True, + }, + 'get_interface': odict([('bond0.12', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('vlan_raw_device', 'bond1'), + ('mode', '802.3ad'), + ])), + ]))]))]), + 'return': [ + 'auto bond0.12\n', + 'iface bond0.12 inet static\n', + ' vlan-raw-device bond1\n', + ' mode 802.3ad\n', + '\n']}, + + # Bridged interface + {'iface_name': 'br0', 'iface_type': 'bridge', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '192.168.4.10', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'bridge_ports': 'eth1', + 'enable_ipv6': False, + 'noifupdown': True, + }, + 'get_interface': odict([('br0', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '192.168.4.10'), + ('netmask', '255.255.255.0'), + ('gateway', '192.168.4.1'), + ('bridging', odict([ + ('ports', 'eth1'), + ])), + ('bridging_keys', ['ports']), + ])), + ]))]))]), + 'return': [ + 'auto br0\n', + 'iface br0 inet static\n', + ' address 192.168.4.10\n', + ' netmask 255.255.255.0\n', + ' gateway 192.168.4.1\n', + ' bridge_ports eth1\n', + '\n']}, + + + # DNS NS as list + {'iface_name': 'eth13', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '192.168.4.9', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'enable_ipv6': False, + 'noifupdown': True, + 'dns': ['8.8.8.8', '8.8.4.4'], + }, + 'get_interface': odict([('eth13', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '192.168.4.9'), + ('netmask', '255.255.255.0'), + ('gateway', '192.168.4.1'), + ('dns_nameservers', ['8.8.8.8', '8.8.4.4']), + ])), + ]))]))]), + 'return': [ + 'auto eth13\n', + 'iface eth13 inet static\n', + ' address 192.168.4.9\n', + ' netmask 255.255.255.0\n', + ' gateway 192.168.4.1\n', + ' dns-nameservers 8.8.8.8 8.8.4.4\n', + '\n']}, + + # DNS NS as string + {'iface_name': 'eth14', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'static', + 'ipaddr': '192.168.4.9', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'enable_ipv6': False, + 'noifupdown': True, + 'dns': '8.8.8.8 8.8.4.4', + }, + 'get_interface': odict([('eth14', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'static'), + ('filename', None), + ('address', '192.168.4.9'), + ('netmask', '255.255.255.0'), + ('gateway', '192.168.4.1'), + ('dns_nameservers', ['8.8.8.8', '8.8.4.4']), + ])), + ]))]))]), + 'return': [ + 'auto eth14\n', + 'iface eth14 inet static\n', + ' address 192.168.4.9\n', + ' netmask 255.255.255.0\n', + ' gateway 192.168.4.1\n', + ' dns-nameservers 8.8.8.8 8.8.4.4\n', + '\n']}, + + # Loopback; with IPv4 and IPv6 address + {'iface_name': 'lo15', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'loopback', + 'ipaddr': '192.168.4.9', + 'netmask': '255.255.255.0', + 'gateway': '192.168.4.1', + 'enable_ipv6': True, + 'ipv6proto': 'loopback', + 'ipv6ipaddr': 'fc00::1', + 'ipv6netmask': '128', + 'ipv6_autoconf': False, + 'noifupdown': True, + }, + 'get_interface': odict([('lo15', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'loopback'), + ('filename', None), + ('address', '192.168.4.9'), + ('netmask', '255.255.255.0'), + ('gateway', '192.168.4.1'), + ])), + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'loopback'), + ('filename', None), + ('address', 'fc00::1'), + ('netmask', 128), + ])), + ]))]))]), + 'return': [ + 'auto lo15\n', + 'iface lo15 inet loopback\n', + ' address 192.168.4.9\n', + ' netmask 255.255.255.0\n', + ' gateway 192.168.4.1\n', + 'iface lo15 inet6 loopback\n', + ' address fc00::1\n', + ' netmask 128\n', + '\n']}, + + # Loopback; with only IPv6 address; enabled=False + {'iface_name': 'lo16', 'iface_type': 'eth', 'enabled': False, + 'build_interface': { + 'enable_ipv6': True, + 'ipv6proto': 'loopback', + 'ipv6ipaddr': 'fc00::1', + 'ipv6netmask': '128', + 'ipv6_autoconf': False, + 'noifupdown': True, + }, + 'get_interface': odict([('lo16', odict([('data', odict([ + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'loopback'), + ('filename', None), + ('address', 'fc00::1'), + ('netmask', 128), + ])), + ]))]))]), + 'return': [ + 'iface lo16 inet6 loopback\n', + ' address fc00::1\n', + ' netmask 128\n', + '\n']}, + + # Loopback; without address + {'iface_name': 'lo17', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'loopback', + 'enable_ipv6': False, + 'noifupdown': True, + }, + 'get_interface': odict([('lo17', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'loopback'), + ('filename', None), + ])), + ]))]))]), + 'return': [ + 'auto lo17\n', + 'iface lo17 inet loopback\n', + '\n']}, + + # IPv4=DHCP; IPv6=Static; with IPv6 netmask + {'iface_name': 'eth18', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'dhcp', + 'enable_ipv6': True, + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:c0::3', + 'ipv6netmask': '64', + 'ipv6gateway': '2001:db8:dead:c0::1', + 'noifupdown': True, + }, + 'get_interface': odict([('eth18', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'dhcp'), + ('filename', None), + ])), + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:c0::3'), + ('netmask', 64), + ('gateway', '2001:db8:dead:c0::1'), + ])), + ]))]))]), + 'return': [ + 'auto eth18\n', + 'iface eth18 inet dhcp\n', + 'iface eth18 inet6 static\n', + ' address 2001:db8:dead:c0::3\n', + ' netmask 64\n', + ' gateway 2001:db8:dead:c0::1\n', + '\n']}, + + # IPv4=DHCP; IPv6=Static; without IPv6 netmask + {'iface_name': 'eth19', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'proto': 'dhcp', + 'enable_ipv6': True, + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:c0::3/64', + 'ipv6gateway': '2001:db8:dead:c0::1', + 'noifupdown': True, + }, + 'get_interface': odict([('eth19', odict([('enabled', True), ('data', odict([ + ('inet', odict([ + ('addrfam', 'inet'), + ('proto', 'dhcp'), + ('filename', None), + ])), + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('address', '2001:db8:dead:c0::3/64'), + ('gateway', '2001:db8:dead:c0::1'), + ])), + ]))]))]), + 'return': [ + 'auto eth19\n', + 'iface eth19 inet dhcp\n', + 'iface eth19 inet6 static\n', + ' address 2001:db8:dead:c0::3/64\n', + ' gateway 2001:db8:dead:c0::1\n', + '\n']}, + + # IPv6-only; static with autoconf and accept_ra forced + {'iface_name': 'eth20', 'iface_type': 'eth', 'enabled': True, + 'build_interface': { + 'ipv6proto': 'static', + 'ipv6ipaddr': '2001:db8:dead:beef::3/64', + 'ipv6gateway': '2001:db8:dead:beef::1', + 'enable_ipv6': True, + 'autoconf': 1, + 'accept_ra': 2, + 'noifupdown': True, + }, + 'get_interface': odict([('eth20', odict([('enabled', True), ('data', odict([ + ('inet6', odict([ + ('addrfam', 'inet6'), + ('proto', 'static'), + ('filename', None), + ('autoconf', 1), + ('address', '2001:db8:dead:beef::3/64'), + ('gateway', '2001:db8:dead:beef::1'), + ('accept_ra', 2), + ])), + ]))]))]), + 'return': [ + 'auto eth20\n', + 'iface eth20 inet6 static\n', + ' autoconf 1\n', + ' address 2001:db8:dead:beef::3/64\n', + ' gateway 2001:db8:dead:beef::1\n', + ' accept_ra 2\n', + '\n']}, + ] +# fmt: on + + +@pytest.fixture +def configure_loader_modules(): + return {debian_ip: {}} + + +# 'build_bond' function tests: 3 + + +def test_build_bond(): + """ + Test if it create a bond script in /etc/modprobe.d with the passed + settings and load the bonding kernel module. + """ + with patch( + "salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={}) + ), patch("salt.modules.debian_ip._write_file", MagicMock(return_value=True)): + mock = MagicMock(return_value=1) + with patch.dict(debian_ip.__grains__, {"osrelease": mock}): + mock = MagicMock(return_value=True) + with patch.dict( + debian_ip.__salt__, {"kmod.load": mock, "pkg.install": mock} + ): + assert debian_ip.build_bond("bond0") == "" + + +def test_error_message_iface_should_process_non_str_expected(): + values = [1, True, False, "no-kaboom"] + iface = "ethtest" + option = "test" + msg = debian_ip._error_msg_iface(iface, option, values) + assert msg.endswith("[1|True|False|no-kaboom]"), msg + + +def test_error_message_network_should_process_non_str_expected(): + values = [1, True, False, "no-kaboom"] + msg = debian_ip._error_msg_network("fnord", values) + assert msg.endswith("[1|True|False|no-kaboom]"), msg + + +def test_build_bond_exception(): + """ + Test if it create a bond script in /etc/modprobe.d with the passed + settings and load the bonding kernel module. + """ + with patch( + "salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={}) + ): + mock = MagicMock(return_value=1) + with patch.dict(debian_ip.__grains__, {"osrelease": mock}): + mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound("error")) + with patch.object(jinja2.Environment, "get_template", mock): + assert debian_ip.build_bond("bond0") == "" + + +def test_build_bond_data(): + """ + Test if it create a bond script in /etc/modprobe.d with the passed + settings and load the bonding kernel module. + """ + with patch( + "salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={}) + ), patch("salt.modules.debian_ip._read_temp", MagicMock(return_value=True)): + mock = MagicMock(return_value=1) + with patch.dict(debian_ip.__grains__, {"osrelease": mock}): + assert debian_ip.build_bond("bond0", test="True") + + +# 'build_routes' function tests: 2 + + +def test_build_routes(): + """ + Test if it add route scripts for a network interface using up commands. + """ + with patch( + "salt.modules.debian_ip._parse_routes", + MagicMock(return_value={"routes": []}), + ), patch( + "salt.modules.debian_ip._write_file_routes", MagicMock(return_value=True) + ), patch( + "salt.modules.debian_ip._read_file", MagicMock(return_value="salt") + ): + assert debian_ip.build_routes("eth0") == "saltsalt" + + +def test_build_routes_exception(): + """ + Test if it add route scripts for a network interface using up commands. + """ + with patch( + "salt.modules.debian_ip._parse_routes", + MagicMock(return_value={"routes": []}), + ): + assert debian_ip.build_routes("eth0", test="True") + + mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound("err")) + with patch.object(jinja2.Environment, "get_template", mock): + assert debian_ip.build_routes("eth0") == "" + + +# 'down' function tests: 1 + + +def test_down(): + """ + Test if it shutdown a network interface + """ + assert debian_ip.down("eth0", "slave") is None + + mock = MagicMock(return_value="Salt") + with patch.dict(debian_ip.__salt__, {"cmd.run": mock}): + assert debian_ip.down("eth0", "eth") == "Salt" + + +# 'get_bond' function tests: 1 + + +def test_get_bond(): + """ + Test if it return the content of a bond script + """ + assert debian_ip.get_bond("bond0") == "" + + +# '_parse_interfaces' function tests: 1 + + +def test_parse_interfaces(test_interfaces): + """ + Test if it returns the correct data for parsed configuration file + """ + with tempfile.NamedTemporaryFile(mode="r", delete=True) as tfile: + for iface in test_interfaces: + iname = iface["iface_name"] + if iface.get("skip_test", False): + continue + with salt.utils.files.fopen(str(tfile.name), "w") as fh: + fh.writelines(iface["return"]) + for inet in ["inet", "inet6"]: + if inet in iface["get_interface"][iname]["data"]: + iface["get_interface"][iname]["data"][inet]["filename"] = str( + tfile.name + ) + assert ( + debian_ip._parse_interfaces([str(tfile.name)]) == iface["get_interface"] + ) + + +# 'get_interface' function tests: 1 + + +def test_get_interface(test_interfaces): + """ + Test if it return the contents of an interface script + """ + for iface in test_interfaces: + if iface.get("skip_test", False): + continue + with patch.object( + debian_ip, + "_parse_interfaces", + MagicMock(return_value=iface["get_interface"]), + ): + assert debian_ip.get_interface(iface["iface_name"]) == iface["return"] + + +# 'build_interface' function tests: 1 + + +def test_build_interface(test_interfaces): + """ + Test if it builds an interface script for a network interface. + """ + with patch( + "salt.modules.debian_ip._write_file_ifaces", MagicMock(return_value="salt") + ): + assert debian_ip.build_interface("eth0", "eth", "enabled") == [ + "s\n", + "a\n", + "l\n", + "t\n", + ] + + assert debian_ip.build_interface("eth0", "eth", "enabled", test="True") + + with patch.object( + debian_ip, "_parse_settings_eth", MagicMock(return_value={"routes": []}) + ): + for eth_t in ["bridge", "slave", "bond"]: + pytest.raises( + AttributeError, + debian_ip.build_interface, + "eth0", + eth_t, + "enabled", + ) + + assert debian_ip.build_interface("eth0", "eth", "enabled", test="True") + + with tempfile.NamedTemporaryFile(mode="r", delete=True) as tfile: + with patch("salt.modules.debian_ip._DEB_NETWORK_FILE", str(tfile.name)): + for iface in test_interfaces: + if iface.get("skip_test", False): + continue + # Skip tests that require __salt__['pkg.install']() + if iface["iface_type"] in ["bridge", "pppoe", "vlan"]: + continue + assert ( + debian_ip.build_interface( + iface=iface["iface_name"], + iface_type=iface["iface_type"], + enabled=iface["enabled"], + interface_file=tfile.name, + **iface["build_interface"] + ) + == iface["return"] + ) + + +# 'up' function tests: 1 + + +def test_up(): + """ + Test if it start up a network interface + """ + assert debian_ip.down("eth0", "slave") is None + + mock = MagicMock(return_value="Salt") + with patch.dict(debian_ip.__salt__, {"cmd.run": mock}): + assert debian_ip.up("eth0", "eth") == "Salt" + + +# 'get_network_settings' function tests: 1 + + +def test_get_network_settings(): + """ + Test if it return the contents of the global network script. + """ + with patch.dict( + debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "14"} + ), patch( + "salt.modules.debian_ip._parse_hostname", + MagicMock(return_value="SaltStack"), + ), patch( + "salt.modules.debian_ip._parse_domainname", + MagicMock(return_value="saltstack.com"), + ): + mock_avai = MagicMock(return_value=True) + with patch.dict( + debian_ip.__salt__, + {"service.available": mock_avai, "service.status": mock_avai}, + ): + assert debian_ip.get_network_settings() == [ + "NETWORKING=yes\n", + "HOSTNAME=SaltStack\n", + "DOMAIN=saltstack.com\n", + ] + + mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound("error")) + with patch.object(jinja2.Environment, "get_template", mock): + assert debian_ip.get_network_settings() == "" + + +# 'get_routes' function tests: 1 + + +def test_get_routes(): + """ + Test if it return the routes for the interface + """ + with patch("salt.modules.debian_ip._read_file", MagicMock(return_value="salt")): + assert debian_ip.get_routes("eth0") == "saltsalt" + + +# 'apply_network_settings' function tests: 1 + + +@pytest.mark.slow_test +def test_apply_network_settings(): + """ + Test if it apply global network configuration. + """ + mock = MagicMock(return_value=True) + with patch.dict( + debian_ip.__salt__, + {"network.mod_hostname": mock, "service.stop": mock, "service.start": mock}, + ): + assert debian_ip.apply_network_settings() is True + + +# 'build_network_settings' function tests: 1 + + +def test_build_network_settings(): + """ + Test if it build the global network script. + """ + with patch( + "salt.modules.debian_ip._parse_network_settings", + MagicMock( + return_value={ + "networking": "yes", + "hostname": "Salt.saltstack.com", + "domainname": "saltstack.com", + "search": "test.saltstack.com", + } + ), + ), patch( + "salt.modules.debian_ip._write_file_network", MagicMock(return_value=True) + ): + with patch.dict( + debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "14"} + ): + mock = MagicMock(return_value=True) + with patch.dict( + debian_ip.__salt__, + { + "service.available": mock, + "service.disable": mock, + "service.enable": mock, + }, + ): + assert debian_ip.build_network_settings() == [ + "NETWORKING=yes\n", + "HOSTNAME=Salt\n", + "DOMAIN=saltstack.com\n", + "SEARCH=test.saltstack.com\n", + ] + + mock = MagicMock( + side_effect=jinja2.exceptions.TemplateNotFound("error") + ) + with patch.object(jinja2.Environment, "get_template", mock): + assert debian_ip.build_network_settings() == "" + + with patch.dict( + debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "10"} + ): + mock = MagicMock(return_value=True) + with patch.dict( + debian_ip.__salt__, + { + "service.available": mock, + "service.disable": mock, + "service.enable": mock, + }, + ): + mock = MagicMock( + side_effect=jinja2.exceptions.TemplateNotFound("error") + ) + with patch.object(jinja2.Environment, "get_template", mock): + assert debian_ip.build_network_settings() == "" + + with patch.object( + debian_ip, "_read_temp", MagicMock(return_value=True) + ): + assert debian_ip.build_network_settings(test="True") def test_when_no_adapters_are_passed_to_filter_none_should_be_returned(): diff --git a/tests/pytests/unit/modules/test_dig.py b/tests/pytests/unit/modules/test_dig.py index 29cdb816d2af..dcc0acc82953 100644 --- a/tests/pytests/unit/modules/test_dig.py +++ b/tests/pytests/unit/modules/test_dig.py @@ -1,19 +1,50 @@ +""" + Test cases for salt.modules.dig +""" + + import pytest -import salt.modules.cmdmod as cmdmod import salt.modules.dig as dig from tests.support.mock import MagicMock, patch @pytest.fixture def configure_loader_modules(): - return { - dig: { - "__salt__": { - "cmd.run_all": cmdmod.run_all, + return {dig: {}} + + +class SpfValues: + def __call__(self, key, python_shell=False): + _spf_values = { + "dig +short xmission.com TXT": { + "pid": 27282, + "retcode": 0, + "stderr": "", + "stdout": '"v=spf1 a mx include:_spf.xmission.com ?all"', + }, + "dig +short _spf.xmission.com TXT": { + "pid": 27282, + "retcode": 0, + "stderr": "", + "stdout": '"v=spf1 a mx ip4:198.60.22.0/24 ip4:166.70.13.0/24 ~all"', + }, + "dig +short xmission-redirect.com TXT": { + "pid": 27282, + "retcode": 0, + "stderr": "", + "stdout": "v=spf1 redirect=_spf.xmission.com", + }, + "dig +short foo.com TXT": { + "pid": 27282, + "retcode": 0, + "stderr": "", + "stdout": "v=spf1 ip4:216.73.93.70/31 ip4:216.73.93.72/31 ~all", }, } - } + return _spf_values.get( + " ".join(key), {"pid": 27310, "retcode": 0, "stderr": "", "stdout": ""} + ) def test_dig_cname_found(): @@ -40,3 +71,143 @@ def test_dig_cname_none_found(): ) with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): assert dig.CNAME("www.google.com") == "" + + +def test_check_ip(): + assert dig.check_ip("127.0.0.1") + + +def test_check_ip_ipv6(): + assert dig.check_ip("1111:2222:3333:4444:5555:6666:7777:8888") + + +def test_check_ip_ipv6_valid(): + assert dig.check_ip("2607:fa18:0:3::4") + + +def test_check_ip_neg(): + assert not dig.check_ip("-127.0.0.1") + + +def test_check_ip_empty(): + assert not dig.check_ip("") + + +def test_a(): + dig_mock = MagicMock( + return_value={ + "pid": 3656, + "retcode": 0, + "stderr": "", + "stdout": ( + "74.125.193.104\n" + "74.125.193.105\n" + "74.125.193.99\n" + "74.125.193.106\n" + "74.125.193.103\n" + "74.125.193.147" + ), + } + ) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.A("www.google.com") == [ + "74.125.193.104", + "74.125.193.105", + "74.125.193.99", + "74.125.193.106", + "74.125.193.103", + "74.125.193.147", + ] + + +def test_ptr(): + dig_mock = MagicMock( + return_value={ + "pid": 3657, + "retcode": 0, + "stderr": "", + "stdout": ("dns.google."), + } + ) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.ptr("8.8.8.8") == [ + "dns.google.", + ] + + +def test_aaaa(): + dig_mock = MagicMock( + return_value={ + "pid": 25451, + "retcode": 0, + "stderr": "", + "stdout": "2607:f8b0:400f:801::1014", + } + ) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.AAAA("www.google.com") == ["2607:f8b0:400f:801::1014"] + + +def test_ns(): + with patch("salt.modules.dig.A", MagicMock(return_value=["ns4.google.com."])): + dig_mock = MagicMock( + return_value={ + "pid": 26136, + "retcode": 0, + "stderr": "", + "stdout": "ns4.google.com.", + } + ) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.NS("google.com") == ["ns4.google.com."] + + +def test_spf(): + dig_mock = MagicMock(side_effect=SpfValues()) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.SPF("foo.com") == ["216.73.93.70/31", "216.73.93.72/31"] + + +def test_spf_redir(): + """ + Test for SPF records which use the 'redirect' SPF mechanism + https://en.wikipedia.org/wiki/Sender_Policy_Framework#Mechanisms + """ + dig_mock = MagicMock(side_effect=SpfValues()) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.SPF("xmission-redirect.com") == ["198.60.22.0/24", "166.70.13.0/24"] + + +def test_spf_include(): + """ + Test for SPF records which use the 'include' SPF mechanism + https://en.wikipedia.org/wiki/Sender_Policy_Framework#Mechanisms + """ + dig_mock = MagicMock(side_effect=SpfValues()) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.SPF("xmission.com") == ["198.60.22.0/24", "166.70.13.0/24"] + + +def test_mx(): + dig_mock = MagicMock( + return_value={ + "pid": 27780, + "retcode": 0, + "stderr": "", + "stdout": ( + "10 aspmx.l.google.com.\n" + "20 alt1.aspmx.l.google.com.\n" + "40 alt3.aspmx.l.google.com.\n" + "50 alt4.aspmx.l.google.com.\n" + "30 alt2.aspmx.l.google.com." + ), + } + ) + with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): + assert dig.MX("google.com") == [ + ["10", "aspmx.l.google.com."], + ["20", "alt1.aspmx.l.google.com."], + ["40", "alt3.aspmx.l.google.com."], + ["50", "alt4.aspmx.l.google.com."], + ["30", "alt2.aspmx.l.google.com."], + ] diff --git a/tests/pytests/unit/modules/test_dnsutil.py b/tests/pytests/unit/modules/test_dnsutil.py new file mode 100644 index 000000000000..35e04519ad6d --- /dev/null +++ b/tests/pytests/unit/modules/test_dnsutil.py @@ -0,0 +1,137 @@ +""" + :codeauthor: Nicole Thomas + + TestCase for salt.modules.dnsutil +""" + +import pytest + +import salt.modules.dnsutil as dnsutil +import salt.utils.stringutils +from tests.support.mock import MagicMock, mock_open, patch + + +@pytest.fixture +def mock_hosts_file(): + return ( + "##\n" + "# Host Database\n" + "#\n" + "# localhost is used to configure the loopback interface\n" + "# when the system is booting. Do not change this entry.\n" + "##\n" + "127.0.0.1 localhost\n" + "255.255.255.255 broadcasthost\n" + "::1 localhost\n" + "fe80::1%lo0 localhost" + ) + + +@pytest.fixture +def mock_hosts_file_rtn(): + return { + "::1": ["localhost"], + "255.255.255.255": ["broadcasthost"], + "127.0.0.1": ["localhost"], + "fe80::1%lo0": ["localhost"], + } + + +@pytest.fixture +def mock_soa_zone(): + return ( + "$TTL 3D\n" + "@ IN SOA land-5.com. root.land-5.com. (\n" + "199609203 ; Serial\n" + "28800 ; Refresh\n" + "7200 ; Retry\n" + "604800 ; Expire\n" + "86400) ; Minimum TTL\n" + "NS land-5.com.\n\n" + "1 PTR localhost." + ) + + +@pytest.fixture +def mock_writes_list(): + return [ + "##\n", + "# Host Database\n", + "#\n", + "# localhost is used to configure the loopback interface\n", + "# when the system is booting. Do not change this entry.\n", + "##\n", + "127.0.0.1 localhost", + "\n", + "255.255.255.255 broadcasthost", + "\n", + "::1 localhost", + "\n", + "fe80::1%lo0 localhost", + "\n", + ] + + +@pytest.fixture +def configure_loader_modules(): + return {dnsutil: {}} + + +def test_parse_hosts(mock_hosts_file): + with patch("salt.utils.files.fopen", mock_open(read_data=mock_hosts_file)): + assert dnsutil.parse_hosts() == { + "::1": ["localhost"], + "255.255.255.255": ["broadcasthost"], + "127.0.0.1": ["localhost"], + "fe80::1%lo0": ["localhost"], + } + + +def test_hosts_append(mock_hosts_file, mock_hosts_file_rtn): + with patch( + "salt.utils.files.fopen", mock_open(read_data=mock_hosts_file) + ) as m_open, patch( + "salt.modules.dnsutil.parse_hosts", + MagicMock(return_value=mock_hosts_file_rtn), + ): + dnsutil.hosts_append("/etc/hosts", "127.0.0.1", "ad1.yuk.co,ad2.yuk.co") + writes = m_open.write_calls() + # We should have called .write() only once, with the expected + # content + num_writes = len(writes) + assert num_writes == 1, num_writes + expected = salt.utils.stringutils.to_str("\n127.0.0.1 ad1.yuk.co ad2.yuk.co") + assert writes[0] == expected, writes[0] + + +def test_hosts_remove(mock_hosts_file, mock_writes_list): + to_remove = "ad1.yuk.co" + new_mock_file = mock_hosts_file + "\n127.0.0.1 " + to_remove + "\n" + with patch("salt.utils.files.fopen", mock_open(read_data=new_mock_file)) as m_open: + dnsutil.hosts_remove("/etc/hosts", to_remove) + writes = m_open.write_calls() + assert writes == mock_writes_list, writes + + +def test_to_seconds_hour(): + assert dnsutil._to_seconds("4H") == 14400, "Did not detect valid hours as invalid" + + +def test_to_seconds_day(): + assert dnsutil._to_seconds("1D") == 86400, "Did not detect valid day as invalid" + + +def test_to_seconds_week(): + assert ( + dnsutil._to_seconds("2W") == 604800 + ), "Did not set time greater than one week to one week" + + +def test_to_seconds_empty(): + assert dnsutil._to_seconds("") == 604800, "Did not set empty time to one week" + + +def test_to_seconds_large(): + assert ( + dnsutil._to_seconds("604801") == 604800 + ), "Did not set time greater than one week to one week" diff --git a/tests/pytests/unit/modules/test_dpkg_lowpkg.py b/tests/pytests/unit/modules/test_dpkg_lowpkg.py index 1a89660c02bb..41bd615ff296 100644 --- a/tests/pytests/unit/modules/test_dpkg_lowpkg.py +++ b/tests/pytests/unit/modules/test_dpkg_lowpkg.py @@ -1,9 +1,369 @@ +""" + :codeauthor: Jayesh Kariya + + Test cases for salt.modules.dpkg +""" + + +import logging import os +import pytest + import salt.modules.dpkg_lowpkg as dpkg from tests.support.mock import MagicMock, mock_open, patch +@pytest.fixture +def configure_loader_modules(): + return {dpkg: {}} + + +def setUp(self): + dpkg_lowpkg_logger = logging.getLogger("salt.modules.dpkg_lowpkg") + self.level = dpkg_lowpkg_logger.level + dpkg_lowpkg_logger.setLevel(logging.FATAL) + + +def tearDown(self): + logging.getLogger("salt.modules.dpkg_lowpkg").setLevel(self.level) + + +def dpkg_L_side_effect(cmd, **kwargs): + assert cmd[:2] == ["dpkg", "-L"] + package = cmd[2] + return dpkg_l_output[package] + + +dpkg_error_msg = """dpkg-query: package 'httpd' is not installed +Use dpkg --contents (= dpkg-deb --contents) to list archive files contents. +""" + + +dpkg_l_output = { + "hostname": """\ +/. +/bin +/bin/hostname +/usr +/usr/share +/usr/share/doc +/usr/share/doc/hostname +/usr/share/doc/hostname/changelog.gz +/usr/share/doc/hostname/copyright +/usr/share/man +/usr/share/man/man1 +/usr/share/man/man1/hostname.1.gz +/bin/dnsdomainname +/bin/domainname +/bin/nisdomainname +/bin/ypdomainname +/usr/share/man/man1/dnsdomainname.1.gz +/usr/share/man/man1/domainname.1.gz +/usr/share/man/man1/nisdomainname.1.gz +/usr/share/man/man1/ypdomainname.1.gz +""" +} + + +# 'unpurge' function tests: 2 + + +def test_unpurge(): + """ + Test if it change package selection for each package + specified to 'install' + """ + mock = MagicMock(return_value=[]) + with patch.dict(dpkg.__salt__, {"pkg.list_pkgs": mock, "cmd.run": mock}): + assert dpkg.unpurge("curl") == {} + + +def test_unpurge_empty_package(): + """ + Test if it change package selection for each package + specified to 'install' + """ + assert dpkg.unpurge() == {} + + +# 'list_pkgs' function tests: 1 + + +def test_list_pkgs(): + """ + Test if it lists the packages currently installed + """ + mock = MagicMock( + return_value={ + "retcode": 0, + "stderr": "", + "stdout": "installed\thostname\t3.21", + } + ) + with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): + assert dpkg.list_pkgs("hostname") == {"hostname": "3.21"} + + mock = MagicMock( + return_value={ + "retcode": 1, + "stderr": "dpkg-query: no packages found matching httpd", + "stdout": "", + } + ) + with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): + assert ( + dpkg.list_pkgs("httpd") + == "Error: dpkg-query: no packages found matching httpd" + ) + + +# 'file_list' function tests: 1 + + +def test_file_list(): + """ + Test if it lists the files that belong to a package. + """ + dpkg_query_mock = MagicMock( + return_value={"retcode": 0, "stderr": "", "stdout": "installed\thostname"} + ) + dpkg_L_mock = MagicMock(side_effect=dpkg_L_side_effect) + with patch.dict( + dpkg.__salt__, {"cmd.run_all": dpkg_query_mock, "cmd.run": dpkg_L_mock} + ): + assert dpkg.file_list("hostname") == { + "errors": [], + "files": [ + "/.", + "/bin", + "/bin/dnsdomainname", + "/bin/domainname", + "/bin/hostname", + "/bin/nisdomainname", + "/bin/ypdomainname", + "/usr", + "/usr/share", + "/usr/share/doc", + "/usr/share/doc/hostname", + "/usr/share/doc/hostname/changelog.gz", + "/usr/share/doc/hostname/copyright", + "/usr/share/man", + "/usr/share/man/man1", + "/usr/share/man/man1/dnsdomainname.1.gz", + "/usr/share/man/man1/domainname.1.gz", + "/usr/share/man/man1/hostname.1.gz", + "/usr/share/man/man1/nisdomainname.1.gz", + "/usr/share/man/man1/ypdomainname.1.gz", + ], + } + + mock = MagicMock( + return_value={"retcode": 1, "stderr": dpkg_error_msg, "stdout": ""} + ) + with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): + assert dpkg.file_list("httpd") == "Error: " + dpkg_error_msg + + +# 'file_dict' function tests: 1 + + +def test_file_dict(): + """ + Test if it lists the files that belong to a package, grouped by package + """ + dpkg_query_mock = MagicMock( + return_value={"retcode": 0, "stderr": "", "stdout": "installed\thostname"} + ) + dpkg_L_mock = MagicMock(side_effect=dpkg_L_side_effect) + with patch.dict( + dpkg.__salt__, {"cmd.run_all": dpkg_query_mock, "cmd.run": dpkg_L_mock} + ): + expected = { + "errors": [], + "packages": { + "hostname": [ + "/.", + "/bin", + "/bin/hostname", + "/usr", + "/usr/share", + "/usr/share/doc", + "/usr/share/doc/hostname", + "/usr/share/doc/hostname/changelog.gz", + "/usr/share/doc/hostname/copyright", + "/usr/share/man", + "/usr/share/man/man1", + "/usr/share/man/man1/hostname.1.gz", + "/bin/dnsdomainname", + "/bin/domainname", + "/bin/nisdomainname", + "/bin/ypdomainname", + "/usr/share/man/man1/dnsdomainname.1.gz", + "/usr/share/man/man1/domainname.1.gz", + "/usr/share/man/man1/nisdomainname.1.gz", + "/usr/share/man/man1/ypdomainname.1.gz", + ] + }, + } + assert dpkg.file_dict("hostname") == expected + + mock = MagicMock( + return_value={"retcode": 1, "stderr": dpkg_error_msg, "stdout": ""} + ) + with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): + assert dpkg.file_dict("httpd") == "Error: " + dpkg_error_msg + + +def test_bin_pkg_info_spaces(): + """ + Test the bin_pkg_info function + """ + file_proto_mock = MagicMock(return_value=True) + with patch.dict(dpkg.__salt__, {"config.valid_fileproto": file_proto_mock}): + cache_mock = MagicMock(return_value="/path/to/some/package.deb") + with patch.dict(dpkg.__salt__, {"cp.cache_file": cache_mock}): + dpkg_info_mock = MagicMock( + return_value={ + "retcode": 0, + "stderr": "", + "stdout": ( + " new Debian package, version 2.0\n" + " size 123456 bytes: control archive: 4029 bytes.\n" + " Package : package_name\n" + " Version : 1.0\n" + " Section : section_name\n" + " Priority : priority\n" + " Architecture : all\n" + " Description : some package\n" + ), + } + ) + with patch.dict(dpkg.__salt__, {"cmd.run_all": dpkg_info_mock}): + assert dpkg.bin_pkg_info("package.deb")["name"] == "package_name" + + +def test_bin_pkg_info_no_spaces(): + """ + Test the bin_pkg_info function + """ + file_proto_mock = MagicMock(return_value=True) + with patch.dict(dpkg.__salt__, {"config.valid_fileproto": file_proto_mock}): + cache_mock = MagicMock(return_value="/path/to/some/package.deb") + with patch.dict(dpkg.__salt__, {"cp.cache_file": cache_mock}): + dpkg_info_mock = MagicMock( + return_value={ + "retcode": 0, + "stderr": "", + "stdout": ( + " new Debian package, version 2.0\n" + " size 123456 bytes: control archive: 4029 bytes.\n" + " Package: package_name\n" + " Version: 1.0\n" + " Section: section_name\n" + " Priority: priority\n" + " Architecture: all\n" + " Description: some package\n" + ), + } + ) + with patch.dict(dpkg.__salt__, {"cmd.run_all": dpkg_info_mock}): + assert dpkg.bin_pkg_info("package.deb")["name"] == "package_name" + + +def test_info(): + """ + Test package info + """ + mock = MagicMock( + return_value={ + "retcode": 0, + "stderr": "", + "stdout": os.linesep.join( + [ + "package:bash", + "revision:", + "architecture:amd64", + "maintainer:Ubuntu Developers" + " ", + "summary:", + "source:bash", + "version:4.4.18-2ubuntu1", + "section:shells", + "installed_size:1588", + "size:", + "MD5:", + "SHA1:", + "SHA256:", + "origin:", + "homepage:http://tiswww.case.edu/php/chet/bash/bashtop.html", + "status:ii ", + "description:GNU Bourne Again SHell", + " Bash is an sh-compatible command language interpreter that" + " executes", + " commands read from the standard input or from a file. Bash" + " also", + " incorporates useful features from the Korn and C shells (ksh" + " and csh).", + " .", + " Bash is ultimately intended to be a conformant implementation" + " of the", + " IEEE POSIX Shell and Tools specification (IEEE Working Group" + " 1003.2).", + " .", + " The Programmable Completion Code, by Ian Macdonald, is now" + " found in", + " the bash-completion package.", + "", + "*/~^\\*", # pylint: disable=W1401 + ] + ), + } + ) + + with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}), patch.dict( + dpkg.__grains__, {"os": "Ubuntu", "osrelease_info": (18, 4)} + ), patch("salt.utils.path.which", MagicMock(return_value=False)), patch( + "os.path.exists", MagicMock(return_value=False) + ), patch( + "os.path.getmtime", MagicMock(return_value=1560199259.0) + ): + assert dpkg.info("bash") == { + "bash": { + "architecture": "amd64", + "description": os.linesep.join( + [ + "GNU Bourne Again SHell", + " Bash is an sh-compatible command language interpreter" + " that executes", + " commands read from the standard input or from a file." + " Bash also", + " incorporates useful features from the Korn and C" + " shells (ksh and csh).", + " .", + " Bash is ultimately intended to be a conformant" + " implementation of the", + " IEEE POSIX Shell and Tools specification (IEEE" + " Working Group 1003.2).", + " .", + " The Programmable Completion Code, by Ian Macdonald," + " is now found in", + " the bash-completion package." + os.linesep, + ] + ), + "homepage": "http://tiswww.case.edu/php/chet/bash/bashtop.html", + "maintainer": ( + "Ubuntu Developers " + ), + "package": "bash", + "section": "shells", + "source": "bash", + "status": "ii", + "version": "4.4.18-2ubuntu1", + } + } + + def test_get_pkg_license(): """ Test _get_pkg_license for ignore errors on reading license from copyright files diff --git a/tests/unit/modules/test_glusterfs.py b/tests/pytests/unit/modules/test_glusterfs.py similarity index 52% rename from tests/unit/modules/test_glusterfs.py rename to tests/pytests/unit/modules/test_glusterfs.py index a107ce6acd9d..be0d5b044623 100644 --- a/tests/unit/modules/test_glusterfs.py +++ b/tests/pytests/unit/modules/test_glusterfs.py @@ -1,14 +1,16 @@ """ :codeauthor: Jayesh Kariya :codeauthor: Joe Julian + + Test cases for salt.modules.glusterfs """ +import pytest + import salt.modules.glusterfs as glusterfs from salt.exceptions import SaltInvocationError -from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase class GlusterResults: @@ -498,395 +500,399 @@ class peer_probe: """ -class GlusterfsTestCase(TestCase, LoaderModuleMockMixin): +@pytest.fixture +def configure_loader_modules(): + return {glusterfs: {}} + +maxDiff = None + +# 'peer_status' function tests: 1 + + +def test__get_version(): """ - Test cases for salt.modules.glusterfs + Test parsing of gluster --version. """ + mock_version = MagicMock(return_value="foo") + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): + assert glusterfs._get_version() == (3, 6), "default behaviour" + + mock_version = MagicMock(return_value=version_output_362) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): + assert glusterfs._get_version() == (3, 6, 2) + + mock_version = MagicMock(return_value=version_output_61) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): + assert glusterfs._get_version() == (6, 1) + + more_versions = { + "6.0": (6, 0), + "4.1.10": (4, 1, 10), + "5.13": (5, 13), + "10.0": (10, 0), + } + for v in more_versions: + mock_version = MagicMock(return_value=f"glusterfs {v}") + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): + assert glusterfs._get_version() == more_versions[v] - def setup_loader_modules(self): - return {glusterfs: {}} - maxDiff = None +def test_peer_status(): + """ + Test gluster peer status + """ + mock_run = MagicMock(return_value=xml_peer_present) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.peer_status() == { + "uuid1": {"hostnames": ["node02", "node02.domain.dom", "10.0.0.2"]} + } - # 'peer_status' function tests: 1 + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.peer_status() == {} - def test__get_version(self): - """ - Test parsing of gluster --version. - """ - mock_version = MagicMock(return_value="foo") - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): - self.assertEqual(glusterfs._get_version(), (3, 6), msg="default behaviour") - mock_version = MagicMock(return_value=version_output_362) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): - self.assertEqual(glusterfs._get_version(), (3, 6, 2)) +# 'peer' function tests: 1 - mock_version = MagicMock(return_value=version_output_61) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): - self.assertEqual(glusterfs._get_version(), (6, 1)) - more_versions = { - "6.0": (6, 0), - "4.1.10": (4, 1, 10), - "5.13": (5, 13), - "10.0": (10, 0), - } - for v in more_versions: - mock_version = MagicMock(return_value="glusterfs {}".format(v)) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): - self.assertEqual(glusterfs._get_version(), more_versions[v]) - - def test_peer_status(self): - """ - Test gluster peer status - """ - mock_run = MagicMock(return_value=xml_peer_present) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertDictEqual( - glusterfs.peer_status(), - {"uuid1": {"hostnames": ["node02", "node02.domain.dom", "10.0.0.2"]}}, - ) +def test_peer(): + """ + Test if gluster peer call is successful. + """ + mock_run = MagicMock() + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + mock_run.return_value = xml_peer_probe_already_member + assert glusterfs.peer("salt") - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertDictEqual(glusterfs.peer_status(), {}) + mock_run.return_value = xml_peer_probe_localhost + assert glusterfs.peer("salt") - # 'peer' function tests: 1 + mock_run.return_value = xml_peer_probe_fail_cant_connect + assert not glusterfs.peer("salt") - def test_peer(self): - """ - Test if gluster peer call is successful. - """ - mock_run = MagicMock() - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - mock_run.return_value = xml_peer_probe_already_member - self.assertTrue(glusterfs.peer("salt")) - mock_run.return_value = xml_peer_probe_localhost - self.assertTrue(glusterfs.peer("salt")) +# 'create_volume' function tests: 1 - mock_run.return_value = xml_peer_probe_fail_cant_connect - self.assertFalse(glusterfs.peer("salt")) - # 'create_volume' function tests: 1 +def test_create_volume(): + """ + Test if it creates a glusterfs volume. + """ + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + pytest.raises( + SaltInvocationError, glusterfs.create_volume, "newvolume", "host1:brick" + ) - def test_create_volume(self): - """ - Test if it creates a glusterfs volume. - """ - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertRaises( - SaltInvocationError, glusterfs.create_volume, "newvolume", "host1:brick" - ) + pytest.raises( + SaltInvocationError, glusterfs.create_volume, "newvolume", "host1/brick" + ) - self.assertRaises( - SaltInvocationError, glusterfs.create_volume, "newvolume", "host1/brick" - ) + assert not mock_run.called - self.assertFalse(mock_run.called) + mock_start_volume = MagicMock(return_value=True) + with patch.object(glusterfs, "start_volume", mock_start_volume): + # Create, do not start + assert glusterfs.create_volume("newvolume", "host1:/brick") + assert not mock_start_volume.called - mock_start_volume = MagicMock(return_value=True) - with patch.object(glusterfs, "start_volume", mock_start_volume): - # Create, do not start - self.assertTrue(glusterfs.create_volume("newvolume", "host1:/brick")) - self.assertFalse(mock_start_volume.called) + # Create and start + assert glusterfs.create_volume("newvolume", "host1:/brick", start=True) + assert mock_start_volume.called - # Create and start - self.assertTrue( - glusterfs.create_volume("newvolume", "host1:/brick", start=True) - ) - self.assertTrue(mock_start_volume.called) + mock_start_volume.return_value = False + # Create and fail start + assert not glusterfs.create_volume("newvolume", "host1:/brick", start=True) - mock_start_volume.return_value = False - # Create and fail start - self.assertFalse( - glusterfs.create_volume("newvolume", "host1:/brick", start=True) - ) + mock_run.return_value = xml_command_fail + assert not glusterfs.create_volume( + "newvolume", "host1:/brick", True, True, True, "tcp", True + ) - mock_run.return_value = xml_command_fail - self.assertFalse( - glusterfs.create_volume( - "newvolume", "host1:/brick", True, True, True, "tcp", True - ) - ) - # 'list_volumes' function tests: 1 +# 'list_volumes' function tests: 1 + - def test_list_volumes(self): - """ - Test if it list configured volumes - """ - mock = MagicMock(return_value=xml_volume_absent) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): - self.assertListEqual(glusterfs.list_volumes(), []) +def test_list_volumes(): + """ + Test if it list configured volumes + """ + mock = MagicMock(return_value=xml_volume_absent) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): + assert glusterfs.list_volumes() == [] - mock = MagicMock(return_value=xml_volume_present) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): - self.assertListEqual(glusterfs.list_volumes(), ["Newvolume1", "Newvolume2"]) + mock = MagicMock(return_value=xml_volume_present) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): + assert glusterfs.list_volumes() == ["Newvolume1", "Newvolume2"] - # 'status' function tests: 1 - def test_status(self): - """ - Test if it check the status of a gluster volume. - """ - mock_run = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertIsNone(glusterfs.status("myvol1")) +# 'status' function tests: 1 + + +def test_status(): + """ + Test if it check the status of a gluster volume. + """ + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.status("myvol1") is None + + res = { + "bricks": { + "node01:/tmp/foo": { + "host": "node01", + "hostname": "node01", + "online": True, + "path": "/tmp/foo", + "peerid": "830700d7-0684-497c-a12c-c02e365fb90b", + "pid": "2470", + "port": "49155", + "ports": {"rdma": "N/A", "tcp": "49155"}, + "status": "1", + } + }, + "healers": {}, + "nfs": { + "node01": { + "host": "NFS Server", + "hostname": "NFS Server", + "online": False, + "path": "localhost", + "peerid": "830700d7-0684-497c-a12c-c02e365fb90b", + "pid": "-1", + "port": "N/A", + "ports": {"rdma": "N/A", "tcp": "N/A"}, + "status": "0", + } + }, + } + mock = MagicMock(return_value=xml_volume_status) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): + assert glusterfs.status("myvol1") == res + - res = { +# 'start_volume' function tests: 1 + + +def test_volume_info(): + """ + Test if it returns the volume info. + """ + res = { + "myvol1": { + "brickCount": "1", "bricks": { - "node01:/tmp/foo": { - "host": "node01", - "hostname": "node01", - "online": True, - "path": "/tmp/foo", - "peerid": "830700d7-0684-497c-a12c-c02e365fb90b", - "pid": "2470", - "port": "49155", - "ports": {"rdma": "N/A", "tcp": "49155"}, - "status": "1", - } - }, - "healers": {}, - "nfs": { - "node01": { - "host": "NFS Server", - "hostname": "NFS Server", - "online": False, - "path": "localhost", - "peerid": "830700d7-0684-497c-a12c-c02e365fb90b", - "pid": "-1", - "port": "N/A", - "ports": {"rdma": "N/A", "tcp": "N/A"}, - "status": "0", + "brick1": { + "hostUuid": "830700d7-0684-497c-a12c-c02e365fb90b", + "path": "node01:/tmp/foo", + "uuid": "830700d7-0684-497c-a12c-c02e365fb90b", } }, + "disperseCount": "0", + "distCount": "1", + "id": "f03c2180-cf55-4f77-ae0b-3650f57c82a1", + "name": "myvol1", + "optCount": "1", + "options": {"performance.readdir-ahead": "on"}, + "redundancyCount": "0", + "replicaCount": "1", + "status": "1", + "statusStr": "Started", + "stripeCount": "1", + "transport": "0", + "type": "0", + "typeStr": "Distribute", } - mock = MagicMock(return_value=xml_volume_status) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): - self.assertDictEqual(glusterfs.status("myvol1"), res) - - # 'start_volume' function tests: 1 - - def test_volume_info(self): - """ - Test if it returns the volume info. - """ - res = { - "myvol1": { - "brickCount": "1", + } + mock = MagicMock(return_value=xml_volume_info_running) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): + assert glusterfs.info("myvol1") == res + + +def test_start_volume(): + """ + Test if it start a gluster volume. + """ + # Stopped volume + mock_info = MagicMock(return_value={"Newvolume1": {"status": "0"}}) + with patch.object(glusterfs, "info", mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.start_volume("Newvolume1") is True + assert glusterfs.start_volume("nonExisting") is False + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.start_volume("Newvolume1") is False + + # Started volume + mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}}) + with patch.object(glusterfs, "info", mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.start_volume("Newvolume1", force=True) is True + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + # cmd.run should not be called for already running volume: + assert glusterfs.start_volume("Newvolume1") is True + # except when forcing: + assert glusterfs.start_volume("Newvolume1", force=True) is False + + +# 'stop_volume' function tests: 1 + + +def test_stop_volume(): + """ + Test if it stop a gluster volume. + """ + # Stopped volume + mock_info = MagicMock(return_value={"Newvolume1": {"status": "0"}}) + with patch.object(glusterfs, "info", mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.stop_volume("Newvolume1") is True + assert glusterfs.stop_volume("nonExisting") is False + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + # cmd.run should not be called for already stopped volume: + assert glusterfs.stop_volume("Newvolume1") is True + + # Started volume + mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}}) + with patch.object(glusterfs, "info", mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.stop_volume("Newvolume1") is True + assert glusterfs.stop_volume("nonExisting") is False + mock_run = MagicMock(return_value=xml_command_fail) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.stop_volume("Newvolume1") is False + + +# 'delete_volume' function tests: 1 + + +def test_delete_volume(): + """ + Test if it deletes a gluster volume. + """ + mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}}) + with patch.object(glusterfs, "info", mock_info): + # volume doesn't exist + assert not glusterfs.delete_volume("Newvolume3") + + mock_stop_volume = MagicMock(return_value=True) + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + with patch.object(glusterfs, "stop_volume", mock_stop_volume): + # volume exists, should not be stopped, and is started + assert not glusterfs.delete_volume("Newvolume1", False) + assert not mock_run.called + assert not mock_stop_volume.called + + # volume exists, should be stopped, and is started + assert glusterfs.delete_volume("Newvolume1") + assert mock_run.called + assert mock_stop_volume.called + + # volume exists and isn't started + mock_info = MagicMock(return_value={"Newvolume1": {"status": "2"}}) + with patch.object(glusterfs, "info", mock_info): + mock_run = MagicMock(return_value=xml_command_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.delete_volume("Newvolume1") + mock_run.return_value = xml_command_fail + assert not glusterfs.delete_volume("Newvolume1") + + +# 'add_volume_bricks' function tests: 1 + + +def test_add_volume_bricks(): + """ + Test if it add brick(s) to an existing volume + """ + mock_info = MagicMock( + return_value={ + "Newvolume1": { + "status": "1", "bricks": { - "brick1": { - "hostUuid": "830700d7-0684-497c-a12c-c02e365fb90b", - "path": "node01:/tmp/foo", - "uuid": "830700d7-0684-497c-a12c-c02e365fb90b", - } + "brick1": {"path": "host:/path1"}, + "brick2": {"path": "host:/path2"}, }, - "disperseCount": "0", - "distCount": "1", - "id": "f03c2180-cf55-4f77-ae0b-3650f57c82a1", - "name": "myvol1", - "optCount": "1", - "options": {"performance.readdir-ahead": "on"}, - "redundancyCount": "0", - "replicaCount": "1", - "status": "1", - "statusStr": "Started", - "stripeCount": "1", - "transport": "0", - "type": "0", - "typeStr": "Distribute", } } - mock = MagicMock(return_value=xml_volume_info_running) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock}): - self.assertDictEqual(glusterfs.info("myvol1"), res) - - def test_start_volume(self): - """ - Test if it start a gluster volume. - """ - # Stopped volume - mock_info = MagicMock(return_value={"Newvolume1": {"status": "0"}}) - with patch.object(glusterfs, "info", mock_info): - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.start_volume("Newvolume1"), True) - self.assertEqual(glusterfs.start_volume("nonExisting"), False) - mock_run = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.start_volume("Newvolume1"), False) - - # Started volume - mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}}) - with patch.object(glusterfs, "info", mock_info): - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.start_volume("Newvolume1", force=True), True) - mock_run = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - # cmd.run should not be called for already running volume: - self.assertEqual(glusterfs.start_volume("Newvolume1"), True) - # except when forcing: - self.assertEqual( - glusterfs.start_volume("Newvolume1", force=True), False - ) - - # 'stop_volume' function tests: 1 - - def test_stop_volume(self): - """ - Test if it stop a gluster volume. - """ - # Stopped volume - mock_info = MagicMock(return_value={"Newvolume1": {"status": "0"}}) - with patch.object(glusterfs, "info", mock_info): - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.stop_volume("Newvolume1"), True) - self.assertEqual(glusterfs.stop_volume("nonExisting"), False) - mock_run = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - # cmd.run should not be called for already stopped volume: - self.assertEqual(glusterfs.stop_volume("Newvolume1"), True) - - # Started volume - mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}}) - with patch.object(glusterfs, "info", mock_info): - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.stop_volume("Newvolume1"), True) - self.assertEqual(glusterfs.stop_volume("nonExisting"), False) - mock_run = MagicMock(return_value=xml_command_fail) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.stop_volume("Newvolume1"), False) - - # 'delete_volume' function tests: 1 - - def test_delete_volume(self): - """ - Test if it deletes a gluster volume. - """ - mock_info = MagicMock(return_value={"Newvolume1": {"status": "1"}}) - with patch.object(glusterfs, "info", mock_info): - # volume doesn't exist - self.assertFalse(glusterfs.delete_volume("Newvolume3")) - - mock_stop_volume = MagicMock(return_value=True) - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - with patch.object(glusterfs, "stop_volume", mock_stop_volume): - # volume exists, should not be stopped, and is started - self.assertFalse(glusterfs.delete_volume("Newvolume1", False)) - self.assertFalse(mock_run.called) - self.assertFalse(mock_stop_volume.called) - - # volume exists, should be stopped, and is started - self.assertTrue(glusterfs.delete_volume("Newvolume1")) - self.assertTrue(mock_run.called) - self.assertTrue(mock_stop_volume.called) - - # volume exists and isn't started - mock_info = MagicMock(return_value={"Newvolume1": {"status": "2"}}) - with patch.object(glusterfs, "info", mock_info): - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertTrue(glusterfs.delete_volume("Newvolume1")) - mock_run.return_value = xml_command_fail - self.assertFalse(glusterfs.delete_volume("Newvolume1")) - - # 'add_volume_bricks' function tests: 1 - - def test_add_volume_bricks(self): - """ - Test if it add brick(s) to an existing volume - """ - mock_info = MagicMock( - return_value={ - "Newvolume1": { - "status": "1", - "bricks": { - "brick1": {"path": "host:/path1"}, - "brick2": {"path": "host:/path2"}, - }, - } - } - ) - with patch.object(glusterfs, "info", mock_info): - mock_run = MagicMock(return_value=xml_command_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - # Volume does not exist - self.assertFalse(glusterfs.add_volume_bricks("nonExisting", ["bricks"])) - # Brick already exists - self.assertTrue( - glusterfs.add_volume_bricks("Newvolume1", ["host:/path2"]) - ) - # Already existing brick as a string - self.assertTrue( - glusterfs.add_volume_bricks("Newvolume1", "host:/path2") - ) - self.assertFalse(mock_run.called) - # A new brick: - self.assertTrue( - glusterfs.add_volume_bricks("Newvolume1", ["host:/new1"]) - ) - self.assertTrue(mock_run.called) - - # Gluster call fails - mock_run.return_value = xml_command_fail - self.assertFalse( - glusterfs.add_volume_bricks("Newvolume1", ["new:/path"]) - ) - - # 'get_op_version' function tests: 1 - - def test_get_op_version(self): - """ - Test retrieving the glusterfs op-version - """ - - # Test with xml output structure from v3.7 - mock_run = MagicMock(return_value=xml_op_version_37) + ) + with patch.object(glusterfs, "info", mock_info): + mock_run = MagicMock(return_value=xml_command_success) with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.get_op_version("test"), "30707") + # Volume does not exist + assert not glusterfs.add_volume_bricks("nonExisting", ["bricks"]) + # Brick already exists + assert glusterfs.add_volume_bricks("Newvolume1", ["host:/path2"]) + # Already existing brick as a string + assert glusterfs.add_volume_bricks("Newvolume1", "host:/path2") + assert not mock_run.called + # A new brick: + assert glusterfs.add_volume_bricks("Newvolume1", ["host:/new1"]) + assert mock_run.called + + # Gluster call fails + mock_run.return_value = xml_command_fail + assert not glusterfs.add_volume_bricks("Newvolume1", ["new:/path"]) - # Test with xml output structure from v3.12 - mock_run = MagicMock(return_value=xml_op_version_312) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): - self.assertEqual(glusterfs.get_op_version("test"), "30707") - # 'get_max_op_version' function tests: 1 +# 'get_op_version' function tests: 1 + - def test_get_max_op_version(self): - """ - Test retrieving the glusterfs max-op-version. - """ +def test_get_op_version(): + """ + Test retrieving the glusterfs op-version + """ - mock_xml = MagicMock(return_value=xml_max_op_version) - mock_version = MagicMock(return_value="glusterfs 3.9.1") + # Test with xml output structure from v3.7 + mock_run = MagicMock(return_value=xml_op_version_37) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.get_op_version("test") == "30707" + + # Test with xml output structure from v3.12 + mock_run = MagicMock(return_value=xml_op_version_312) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_run}): + assert glusterfs.get_op_version("test") == "30707" - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): - self.assertFalse(glusterfs.get_max_op_version()[0]) - with patch.object(glusterfs, "_get_version", return_value=(3, 12, 0)): - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_xml}): - self.assertEqual(glusterfs.get_max_op_version(), "31200") +# 'get_max_op_version' function tests: 1 + + +def test_get_max_op_version(): + """ + Test retrieving the glusterfs max-op-version. + """ - # 'set_op_version' function tests: 1 + mock_xml = MagicMock(return_value=xml_max_op_version) + mock_version = MagicMock(return_value="glusterfs 3.9.1") - def test_set_op_version(self): - """ - Test setting the glusterfs op-version - """ - mock_failure = MagicMock(return_value=xml_set_op_version_failure) - mock_success = MagicMock(return_value=xml_set_op_version_success) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_version}): + assert not glusterfs.get_max_op_version()[0] + + with patch.object(glusterfs, "_get_version", return_value=(3, 12, 0)): + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_xml}): + assert glusterfs.get_max_op_version() == "31200" + + +# 'set_op_version' function tests: 1 + + +def test_set_op_version(): + """ + Test setting the glusterfs op-version + """ + mock_failure = MagicMock(return_value=xml_set_op_version_failure) + mock_success = MagicMock(return_value=xml_set_op_version_success) - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_failure}): - self.assertFalse(glusterfs.set_op_version(30707)[0]) + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_failure}): + assert not glusterfs.set_op_version(30707)[0] - with patch.dict(glusterfs.__salt__, {"cmd.run": mock_success}): - self.assertEqual(glusterfs.set_op_version(31200), "Set volume successful") + with patch.dict(glusterfs.__salt__, {"cmd.run": mock_success}): + assert glusterfs.set_op_version(31200) == "Set volume successful" diff --git a/tests/pytests/unit/modules/test_mac_keychain.py b/tests/pytests/unit/modules/test_mac_keychain.py index eb411e69b57b..bbf9d20aaf41 100644 --- a/tests/pytests/unit/modules/test_mac_keychain.py +++ b/tests/pytests/unit/modules/test_mac_keychain.py @@ -65,7 +65,7 @@ def test_list_certs(): out = keychain.list_certs("/path/to/cert.p12") mock.assert_called_once_with( "security find-certificate -a /path/to/cert.p12 | " - 'grep -o "alis".*\\" | grep -o \'\\"[-A-Za-z0-9.:() ]*\\"\'', + 'grep -o "alis.*" | grep -o \'\\"[-A-Za-z0-9.:() ]*\\"\'', python_shell=True, ) @@ -79,7 +79,18 @@ def test_get_friendly_name(): expected = "ID Installer Salt" mock = MagicMock(return_value="friendlyName: ID Installer Salt") with patch.dict(keychain.__salt__, {"cmd.run": mock}): - out = keychain.get_friendly_name("/path/to/cert.p12", "passw0rd") + out = keychain.get_friendly_name("/path/to/cert.p12", "passw0rd", legacy=True) + mock.assert_called_once_with( + "openssl pkcs12 -legacy -in /path/to/cert.p12 -passin pass:passw0rd -info " + "-nodes -nokeys 2> /dev/null | grep friendlyName:", + python_shell=True, + ) + + assert out == expected + + mock = MagicMock(return_value="friendlyName: ID Installer Salt") + with patch.dict(keychain.__salt__, {"cmd.run": mock}): + out = keychain.get_friendly_name("/path/to/cert.p12", "passw0rd", legacy=False) mock.assert_called_once_with( "openssl pkcs12 -in /path/to/cert.p12 -passin pass:passw0rd -info " "-nodes -nokeys 2> /dev/null | grep friendlyName:", diff --git a/tests/pytests/unit/modules/test_openscap.py b/tests/pytests/unit/modules/test_openscap.py index a40dc6124335..627a53ffb389 100644 --- a/tests/pytests/unit/modules/test_openscap.py +++ b/tests/pytests/unit/modules/test_openscap.py @@ -5,35 +5,31 @@ import salt.modules.openscap as openscap from tests.support.mock import MagicMock, Mock, patch -policy_file = "/usr/share/openscap/policy-file-xccdf.xml" - @pytest.fixture -def random_temp_dir(tmp_path): - tmp_dir = tmp_path / "unique" - tmp_dir.mkdir() - return str(tmp_dir) +def policy_file(): + yield "/usr/share/openscap/policy-file-xccdf.xml" @pytest.fixture -def configure_loader_modules(random_temp_dir): +def configure_loader_modules(tmp_path): + random_temp_dir = tmp_path / "unique" + random_temp_dir.mkdir() with patch("salt.modules.openscap.shutil.rmtree", Mock()), patch( "salt.modules.openscap.tempfile.mkdtemp", - Mock(return_value=random_temp_dir), + Mock(return_value=str(random_temp_dir)), ), patch("salt.modules.openscap.os.path.exists", Mock(return_value=True)): yield {openscap: {"__salt__": {"cp.push_dir": MagicMock()}}} -def test_openscap_xccdf_eval_success(random_temp_dir): - with patch( - "salt.modules.openscap.Popen", - MagicMock( - return_value=Mock(**{"returncode": 0, "communicate.return_value": ("", "")}) - ), - ): +def test_openscap_xccdf_eval_success(policy_file): + mock_popen = MagicMock( + return_value=Mock(**{"returncode": 0, "communicate.return_value": ("", "")}) + ) + patch_popen = patch("salt.modules.openscap.subprocess.Popen", mock_popen) + with patch_popen: response = openscap.xccdf(f"eval --profile Default {policy_file}") - assert openscap.tempfile.mkdtemp.call_count == 1 expected_cmd = [ "oscap", "xccdf", @@ -47,34 +43,35 @@ def test_openscap_xccdf_eval_success(random_temp_dir): "Default", policy_file, ] - openscap.Popen.assert_called_once_with( + openscap.subprocess.Popen.assert_called_once_with( expected_cmd, cwd=openscap.tempfile.mkdtemp.return_value, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) - openscap.__salt__["cp.push_dir"].assert_called_once_with(random_temp_dir) + openscap.__salt__["cp.push_dir"].assert_called_once_with( + openscap.tempfile.mkdtemp.return_value + ) assert openscap.shutil.rmtree.call_count == 1 - assert response == { - "upload_dir": random_temp_dir, + expected = { + "upload_dir": openscap.tempfile.mkdtemp.return_value, "error": "", "success": True, "returncode": 0, } + assert response == expected -def test_openscap_xccdf_eval_success_with_failing_rules(random_temp_dir): - with patch( - "salt.modules.openscap.Popen", - MagicMock( - return_value=Mock( - **{"returncode": 2, "communicate.return_value": ("", "some error")} - ) - ), - ): +def test_openscap_xccdf_eval_success_with_failing_rules(policy_file): + mock_popen = MagicMock( + return_value=Mock( + **{"returncode": 2, "communicate.return_value": ("", "some error")} + ) + ) + patch_popen = patch("salt.modules.openscap.subprocess.Popen", mock_popen) + with patch_popen: response = openscap.xccdf(f"eval --profile Default {policy_file}") - assert openscap.tempfile.mkdtemp.call_count == 1 expected_cmd = [ "oscap", "xccdf", @@ -88,49 +85,52 @@ def test_openscap_xccdf_eval_success_with_failing_rules(random_temp_dir): "Default", policy_file, ] - openscap.Popen.assert_called_once_with( + openscap.subprocess.Popen.assert_called_once_with( expected_cmd, cwd=openscap.tempfile.mkdtemp.return_value, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) - openscap.__salt__["cp.push_dir"].assert_called_once_with(random_temp_dir) - assert openscap.shutil.rmtree.call_count == 1 - assert response == { - "upload_dir": random_temp_dir, + openscap.__salt__["cp.push_dir"].assert_called_once_with( + openscap.tempfile.mkdtemp.return_value + ) + expected = { + "upload_dir": openscap.tempfile.mkdtemp.return_value, "error": "some error", "success": True, "returncode": 2, } + assert response == expected def test_openscap_xccdf_eval_fail_no_profile(): response = openscap.xccdf("eval --param Default /unknown/param") error = "the following arguments are required: --profile" - assert response == { + expected = { "error": error, "upload_dir": None, "success": False, "returncode": None, } + assert response == expected -def test_openscap_xccdf_eval_success_ignore_unknown_params(random_temp_dir): - with patch( - "salt.modules.openscap.Popen", - MagicMock( - return_value=Mock( - **{"returncode": 2, "communicate.return_value": ("", "some error")} - ) - ), - ): +def test_openscap_xccdf_eval_success_ignore_unknown_params(): + mock_popen = MagicMock( + return_value=Mock( + **{"returncode": 2, "communicate.return_value": ("", "some error")} + ) + ) + patch_popen = patch("salt.modules.openscap.subprocess.Popen", mock_popen) + with patch_popen: response = openscap.xccdf("eval --profile Default --param Default /policy/file") - assert response == { - "upload_dir": random_temp_dir, + expected = { + "upload_dir": openscap.tempfile.mkdtemp.return_value, "error": "some error", "success": True, "returncode": 2, } + assert response == expected expected_cmd = [ "oscap", "xccdf", @@ -144,7 +144,7 @@ def test_openscap_xccdf_eval_success_ignore_unknown_params(random_temp_dir): "Default", "/policy/file", ] - openscap.Popen.assert_called_once_with( + openscap.subprocess.Popen.assert_called_once_with( expected_cmd, cwd=openscap.tempfile.mkdtemp.return_value, stderr=subprocess.PIPE, @@ -152,43 +152,63 @@ def test_openscap_xccdf_eval_success_ignore_unknown_params(random_temp_dir): ) -def test_openscap_xccdf_eval_evaluation_error(): - with patch( - "salt.modules.openscap.Popen", - MagicMock( - return_value=Mock( - **{ - "returncode": 1, - "communicate.return_value": ("", "evaluation error"), - } - ) - ), - ): +def test_openscap_xccdf_eval_evaluation_error(policy_file): + mock_popen = MagicMock( + return_value=Mock( + **{ + "returncode": 1, + "communicate.return_value": ("", "evaluation error"), + } + ) + ) + patch_popen = patch("salt.modules.openscap.subprocess.Popen", mock_popen) + with patch_popen: response = openscap.xccdf(f"eval --profile Default {policy_file}") - - assert response == { + expected = { "upload_dir": None, "error": "evaluation error", "success": False, "returncode": 1, } + assert response == expected -def test_openscap_xccdf_eval_fail_not_implemented_action(): +def test_openscap_xccdf_eval_fail_not_implemented_action(policy_file): response = openscap.xccdf(f"info {policy_file}") mock_err = "argument action: invalid choice: 'info' (choose from 'eval')" - - assert response == { + expected = { "upload_dir": None, "error": mock_err, "success": False, "returncode": None, } + assert response == expected -def test_new_openscap_xccdf_eval_success(random_temp_dir): +def test_openscap_xccdf_eval_evaluation_unknown_error(policy_file): + mock_popen = MagicMock( + return_value=Mock( + **{ + "returncode": 255, + "communicate.return_value": ("", "unknown error"), + } + ) + ) + patch_popen = patch("salt.modules.openscap.subprocess.Popen", mock_popen) + with patch_popen: + response = openscap.xccdf(f"eval --profile Default {policy_file}") + expected = { + "upload_dir": None, + "error": "unknown error", + "success": False, + "returncode": 255, + } + assert response == expected + + +def test_new_openscap_xccdf_eval_success(policy_file): with patch( - "salt.modules.openscap.Popen", + "salt.modules.openscap.subprocess.Popen", MagicMock( return_value=Mock(**{"returncode": 0, "communicate.return_value": ("", "")}) ), @@ -215,25 +235,27 @@ def test_new_openscap_xccdf_eval_success(random_temp_dir): "Default", policy_file, ] - openscap.Popen.assert_called_once_with( + openscap.subprocess.Popen.assert_called_once_with( expected_cmd, cwd=openscap.tempfile.mkdtemp.return_value, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) - openscap.__salt__["cp.push_dir"].assert_called_once_with(random_temp_dir) + openscap.__salt__["cp.push_dir"].assert_called_once_with( + openscap.tempfile.mkdtemp.return_value + ) assert openscap.shutil.rmtree.call_count == 1 assert response == { - "upload_dir": random_temp_dir, + "upload_dir": openscap.tempfile.mkdtemp.return_value, "error": "", "success": True, "returncode": 0, } -def test_new_openscap_xccdf_eval_success_with_extra_ovalfiles(random_temp_dir): +def test_new_openscap_xccdf_eval_success_with_extra_ovalfiles(policy_file): with patch( - "salt.modules.openscap.Popen", + "salt.modules.openscap.subprocess.Popen", MagicMock( return_value=Mock(**{"returncode": 0, "communicate.return_value": ("", "")}) ), @@ -247,7 +269,6 @@ def test_new_openscap_xccdf_eval_success_with_extra_ovalfiles(random_temp_dir): report="report.html", ) - assert openscap.tempfile.mkdtemp.call_count == 1 expected_cmd = [ "oscap", "xccdf", @@ -263,25 +284,27 @@ def test_new_openscap_xccdf_eval_success_with_extra_ovalfiles(random_temp_dir): "/usr/share/xml/another-oval.xml", "/usr/share/xml/oval.xml", ] - openscap.Popen.assert_called_once_with( + openscap.subprocess.Popen.assert_called_once_with( expected_cmd, cwd=openscap.tempfile.mkdtemp.return_value, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) - openscap.__salt__["cp.push_dir"].assert_called_once_with(random_temp_dir) + openscap.__salt__["cp.push_dir"].assert_called_once_with( + openscap.tempfile.mkdtemp.return_value + ) assert openscap.shutil.rmtree.call_count == 1 assert response == { - "upload_dir": random_temp_dir, + "upload_dir": openscap.tempfile.mkdtemp.return_value, "error": "", "success": True, "returncode": 0, } -def test_new_openscap_xccdf_eval_success_with_failing_rules(random_temp_dir): +def test_new_openscap_xccdf_eval_success_with_failing_rules(policy_file): with patch( - "salt.modules.openscap.Popen", + "salt.modules.openscap.subprocess.Popen", MagicMock( return_value=Mock( **{"returncode": 2, "communicate.return_value": ("", "some error")} @@ -310,25 +333,27 @@ def test_new_openscap_xccdf_eval_success_with_failing_rules(random_temp_dir): "Default", policy_file, ] - openscap.Popen.assert_called_once_with( + openscap.subprocess.Popen.assert_called_once_with( expected_cmd, cwd=openscap.tempfile.mkdtemp.return_value, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) - openscap.__salt__["cp.push_dir"].assert_called_once_with(random_temp_dir) + openscap.__salt__["cp.push_dir"].assert_called_once_with( + openscap.tempfile.mkdtemp.return_value + ) assert openscap.shutil.rmtree.call_count == 1 assert response == { - "upload_dir": random_temp_dir, + "upload_dir": openscap.tempfile.mkdtemp.return_value, "error": "some error", "success": True, "returncode": 2, } -def test_new_openscap_xccdf_eval_evaluation_error(): +def test_new_openscap_xccdf_eval_evaluation_error(policy_file): with patch( - "salt.modules.openscap.Popen", + "salt.modules.openscap.subprocess.Popen", MagicMock( return_value=Mock( **{ diff --git a/tests/pytests/unit/modules/test_ps.py b/tests/pytests/unit/modules/test_ps.py index 074bf9007e15..99540b243d21 100644 --- a/tests/pytests/unit/modules/test_ps.py +++ b/tests/pytests/unit/modules/test_ps.py @@ -6,7 +6,8 @@ import salt.modules.ps import salt.modules.ps as ps import salt.utils.data -from salt.exceptions import SaltInvocationError +import salt.utils.platform +from salt.exceptions import CommandExecutionError, SaltInvocationError from tests.support.mock import MagicMock, Mock, call, patch psutil = pytest.importorskip("salt.utils.psutil_compat") @@ -14,6 +15,11 @@ # TestCase Exceptions are tested in tests/unit/modules/test_ps.py +@pytest.fixture +def configure_loader_modules(): + return {ps: {}} + + @pytest.fixture def sample_process(): status = b"fnord" @@ -135,9 +141,13 @@ def test__status_when_some_matching_processes_then_only_correct_info_should_be_r HAS_PSUTIL_VERSION = False -PSUTIL2 = psutil.version_info >= (2, 0) - STUB_CPU_TIMES = namedtuple("cputimes", "user nice system idle")(1, 2, 3, 4) +STUB_CPU_TIMES_PERCPU = [ + namedtuple("cputimes", "user nice system idle")(1, 2, 3, 4), + namedtuple("cputimes", "user nice system idle")(1, 2, 3, 4), + namedtuple("cputimes", "user nice system idle")(1, 2, 3, 4), + namedtuple("cputimes", "user nice system idle")(1, 2, 3, 4), +] STUB_VIRT_MEM = namedtuple("vmem", "total available percent used free")( 1000, 500, 50, 500, 500 ) @@ -153,9 +163,39 @@ def test__status_when_some_matching_processes_then_only_correct_info_should_be_r "iostat", "bytes_sent, bytes_recv, packets_sent, packets_recv, errin errout dropin dropout", )(1000, 2000, 500, 600, 1, 2, 3, 4) +STUB_NETWORK_IO_PERNIC = { + "lo": STUB_NETWORK_IO, + "eth0": STUB_NETWORK_IO, + "eth1": STUB_NETWORK_IO, +} STUB_DISK_IO = namedtuple( "iostat", "read_count, write_count, read_bytes, write_bytes, read_time, write_time" )(1000, 2000, 500, 600, 2000, 3000) +STUB_DISK_IO_PERDISK = { + "nvme0n1": STUB_DISK_IO, + "nvme0n1p1": STUB_DISK_IO, + "nvme0n1p2": STUB_DISK_IO, + "nvme0n1p3": STUB_DISK_IO, +} + + +@pytest.fixture +def stub_memory_usage(): + return namedtuple( + "vmem", + "total available percent used free active inactive buffers cached shared", + )( + 15722012672, + 9329594368, + 40.7, + 5137018880, + 4678086656, + 6991405056, + 2078953472, + 1156378624, + 4750528512, + 898908160, + ) @pytest.fixture(scope="module") @@ -180,7 +220,7 @@ def stub_user(): def _get_proc_name(proc): - return proc.name() if PSUTIL2 else proc.name + return proc.name() def _get_proc_pid(proc): @@ -202,6 +242,7 @@ def __init__( status=None, username=None, pid=None, + cpu_times=None, ): self._cmdline = salt.utils.data.decode( cmdline if cmdline is not None else [], to_str=True @@ -218,6 +259,25 @@ def __init__( pid if pid is not None else 12345, to_str=True ) + if salt.utils.platform.is_windows(): + scputimes = namedtuple( + "scputimes", ["user", "system", "children_user", "children_system"] + ) + dummy_cpu_times = scputimes(7713.79, 1278.44, 17114.2, 2023.36) + else: + scputimes = namedtuple( + "scputimes", + ["user", "system", "children_user", "children_system", "iowait"], + ) + dummy_cpu_times = scputimes(7713.79, 1278.44, 17114.2, 2023.36, 0.0) + self._cpu_times = cpu_times if cpu_times is not None else dummy_cpu_times + + def __enter__(self): + pass + + def __exit__(self): + pass + def cmdline(self): return self._cmdline @@ -236,16 +296,18 @@ def username(self): def pid(self): return self._pid + def cpu_times(self): + return self._cpu_times + @pytest.fixture def mocked_proc(): mocked_proc = MagicMock("salt.utils.psutil_compat.Process") - if PSUTIL2: - mocked_proc.name = Mock(return_value="test_mock_proc") - mocked_proc.pid = Mock(return_value=9999999999) - else: - mocked_proc.name = "test_mock_proc" - mocked_proc.pid = 9999999999 + mocked_proc.name = Mock(return_value="test_mock_proc") + mocked_proc.pid = Mock(return_value=9999999999) + mocked_proc.cmdline = Mock( + return_value=["test_mock_proc", "--arg", "--kwarg=value"] + ) with patch("salt.utils.psutil_compat.Process.send_signal"), patch( "salt.utils.psutil_compat.process_iter", @@ -254,12 +316,115 @@ def mocked_proc(): yield mocked_proc -@pytest.mark.skipif(not ps.PSUTIL2, reason="Only run for psutil 2.x") def test__get_proc_cmdline(): cmdline = ["echo", "питон"] ret = ps._get_proc_cmdline(DummyProcess(cmdline=cmdline)) assert ret == cmdline, ret + with patch.object(DummyProcess, "cmdline") as mock_cmdline: + mock_cmdline.side_effect = psutil.NoSuchProcess(DummyProcess(cmdline=cmdline)) + ret = ps._get_proc_cmdline(DummyProcess(cmdline=cmdline)) + assert ret == [] + + with patch.object(DummyProcess, "cmdline") as mock_cmdline: + mock_cmdline.side_effect = psutil.AccessDenied(DummyProcess(cmdline=cmdline)) + ret = ps._get_proc_cmdline(DummyProcess(cmdline=cmdline)) + assert ret == [] + + +def test__get_proc_create_time(): + cmdline = ["echo", "питон"] + create_time = 1694729500.1093624 + ret = ps._get_proc_create_time( + DummyProcess(cmdline=cmdline, create_time=create_time) + ) + assert ret == create_time + + with patch.object(DummyProcess, "create_time") as mock_create_time: + mock_create_time.side_effect = psutil.NoSuchProcess( + DummyProcess(cmdline=cmdline, create_time=create_time) + ) + ret = ps._get_proc_create_time( + DummyProcess(cmdline=cmdline, create_time=create_time) + ) + assert ret is None + + with patch.object(DummyProcess, "create_time") as mock_create_time: + mock_create_time.side_effect = psutil.AccessDenied( + DummyProcess(cmdline=cmdline, create_time=create_time) + ) + ret = ps._get_proc_create_time( + DummyProcess(cmdline=cmdline, create_time=create_time) + ) + assert ret is None + + +def test__get_proc_name(): + cmdline = ["echo", "питон"] + proc_name = "proc_name" + ret = ps._get_proc_name(DummyProcess(cmdline=cmdline, name=proc_name)) + assert ret == proc_name + + with patch.object(DummyProcess, "name") as mock_name: + mock_name.side_effect = psutil.NoSuchProcess( + DummyProcess(cmdline=cmdline, name=proc_name) + ) + ret = ps._get_proc_name(DummyProcess(cmdline=cmdline, name=proc_name)) + assert ret == [] + + with patch.object(DummyProcess, "name") as mock_name: + mock_name.side_effect = psutil.AccessDenied( + DummyProcess(cmdline=cmdline, name=proc_name) + ) + ret = ps._get_proc_name(DummyProcess(cmdline=cmdline, name=proc_name)) + assert ret == [] + + +def test__get_proc_status(): + cmdline = ["echo", "питон"] + proc_status = "sleeping" + ret = ps._get_proc_status(DummyProcess(cmdline=cmdline, status=proc_status)) + assert ret == proc_status + + with patch.object(DummyProcess, "status") as mock_status: + mock_status.side_effect = psutil.NoSuchProcess( + DummyProcess(cmdline=cmdline, status=proc_status) + ) + ret = ps._get_proc_status(DummyProcess(cmdline=cmdline, status=proc_status)) + assert ret is None + + with patch.object(DummyProcess, "status") as mock_status: + mock_status.side_effect = psutil.AccessDenied( + DummyProcess(cmdline=cmdline, status=proc_status) + ) + ret = ps._get_proc_status(DummyProcess(cmdline=cmdline, status=proc_status)) + assert ret is None + + +def test__get_proc_username(): + cmdline = ["echo", "питон"] + proc_username = "root" + ret = ps._get_proc_username(DummyProcess(cmdline=cmdline, username=proc_username)) + assert ret == proc_username + + with patch.object(DummyProcess, "username") as mock_username: + mock_username.side_effect = psutil.NoSuchProcess( + DummyProcess(cmdline=cmdline, username=proc_username) + ) + ret = ps._get_proc_username( + DummyProcess(cmdline=cmdline, username=proc_username) + ) + assert ret is None + + with patch.object(DummyProcess, "username") as mock_username: + mock_username.side_effect = psutil.AccessDenied( + DummyProcess(cmdline=cmdline, username=proc_username) + ) + ret = ps._get_proc_username( + DummyProcess(cmdline=cmdline, username=proc_username) + ) + assert ret is None + def test_get_pid_list(): with patch("salt.utils.psutil_compat.pids", MagicMock(return_value=STUB_PID_LIST)): @@ -267,6 +432,14 @@ def test_get_pid_list(): def test_kill_pid(): + cmdline = ["echo", "питон"] + top_proc = DummyProcess(cmdline=cmdline) + + with patch("salt.utils.psutil_compat.Process") as mock_process: + mock_process.side_effect = psutil.NoSuchProcess(top_proc) + ret = ps.kill_pid(0, signal=999) + assert not ret + with patch("salt.utils.psutil_compat.Process") as send_signal_mock: ps.kill_pid(0, signal=999) assert send_signal_mock.call_args == call(0) @@ -278,6 +451,19 @@ def test_pkill(mocked_proc): ps.pkill(_get_proc_name(mocked_proc), signal=test_signal) assert mocked_proc.send_signal.call_args == call(test_signal) + mocked_proc.send_signal = MagicMock(side_effect=psutil.NoSuchProcess(mocked_proc)) + ret = ps.pkill(_get_proc_name(mocked_proc), signal=test_signal) + assert ret is None + + mocked_proc.username = MagicMock(return_value="root") + with patch.object(ps, "_get_proc_username", return_value=None): + ret = ps.pkill(_get_proc_name(mocked_proc), signal=test_signal, user="root") + assert ret is None + + mocked_proc.username = MagicMock(return_value="root") + ret = ps.pkill(_get_proc_name(mocked_proc), signal=test_signal, user="root") + assert mocked_proc.send_signal.call_args == call(test_signal) + def test_pgrep(mocked_proc): with patch( @@ -286,6 +472,10 @@ def test_pgrep(mocked_proc): ): assert mocked_proc.pid in (ps.pgrep(_get_proc_name(mocked_proc)) or []) + assert mocked_proc.pid in ( + ps.pgrep(_get_proc_name(mocked_proc), full=True) or [] + ) + def test_pgrep_regex(mocked_proc): with patch( @@ -301,6 +491,14 @@ def test_cpu_percent(): with patch("salt.utils.psutil_compat.cpu_percent", MagicMock(return_value=1)): assert ps.cpu_percent() == 1 + with patch( + "salt.utils.psutil_compat.cpu_percent", MagicMock(return_value=(1, 1, 1, 1)) + ): + assert ps.cpu_percent(per_cpu=True) == [1, 1, 1, 1] + + with patch("salt.utils.psutil_compat.cpu_percent", MagicMock(return_value=1)): + assert ps.cpu_percent(per_cpu=False) == 1 + def test_cpu_times(): with patch( @@ -308,12 +506,31 @@ def test_cpu_times(): ): assert {"idle": 4, "nice": 2, "system": 3, "user": 1} == ps.cpu_times() + with patch( + "salt.utils.psutil_compat.cpu_times", + MagicMock(return_value=STUB_CPU_TIMES_PERCPU), + ): + assert [ + {"idle": 4, "nice": 2, "system": 3, "user": 1}, + {"idle": 4, "nice": 2, "system": 3, "user": 1}, + {"idle": 4, "nice": 2, "system": 3, "user": 1}, + {"idle": 4, "nice": 2, "system": 3, "user": 1}, + ] == ps.cpu_times(per_cpu=True) + @pytest.mark.skipif( HAS_PSUTIL_VERSION is False, reason="psutil 0.6.0 or greater is required for this test", ) def test_virtual_memory(): + with patch("salt.modules.ps.psutil.version_info", (0, 5, 9)): + with pytest.raises(CommandExecutionError) as exc: + ps.virtual_memory() + assert ( + exc.value.error + == "virtual_memory is only available in psutil 0.6.0 or greater" + ) + with patch( "salt.utils.psutil_compat.virtual_memory", MagicMock(return_value=STUB_VIRT_MEM), @@ -332,6 +549,15 @@ def test_virtual_memory(): reason="psutil 0.6.0 or greater is required for this test", ) def test_swap_memory(): + + with patch("salt.modules.ps.psutil.version_info", (0, 5, 9)): + with pytest.raises(CommandExecutionError) as exc: + ps.swap_memory() + assert ( + exc.value.error + == "swap_memory is only available in psutil 0.6.0 or greater" + ) + with patch( "salt.utils.psutil_compat.swap_memory", MagicMock(return_value=STUB_SWAP_MEM), @@ -377,12 +603,21 @@ def test_disk_partition_usage(): "salt.utils.psutil_compat.disk_partitions", MagicMock(return_value=[STUB_DISK_PARTITION]), ): - assert { - "device": "/dev/disk0s2", - "mountpoint": "/", - "opts": "rw,local,rootfs,dovolfs,journaled,multilabel", - "fstype": "hfs", - } == ps.disk_partitions()[0] + with patch( + "salt.utils.psutil_compat.disk_usage", + MagicMock(return_value=STUB_DISK_USAGE), + ): + result = ps.disk_partition_usage()[0] + assert { + "device": "/dev/disk0s2", + "mountpoint": "/", + "fstype": "hfs", + "opts": "rw,local,rootfs,dovolfs,journaled,multilabel", + "total": 1000, + "used": 500, + "free": 500, + "percent": 50, + } == result def test_network_io_counters(): @@ -401,6 +636,23 @@ def test_network_io_counters(): "dropin": 3, } == ps.network_io_counters() + with patch( + "salt.utils.psutil_compat.net_io_counters", + MagicMock(return_value=STUB_NETWORK_IO_PERNIC), + ): + assert { + "packets_sent": 500, + "packets_recv": 600, + "bytes_recv": 2000, + "dropout": 4, + "bytes_sent": 1000, + "errout": 2, + "errin": 1, + "dropin": 3, + } == ps.network_io_counters(interface="eth0") + + assert not ps.network_io_counters(interface="eth2") + def test_disk_io_counters(): with patch( @@ -416,6 +668,21 @@ def test_disk_io_counters(): "write_count": 2000, } == ps.disk_io_counters() + with patch( + "salt.utils.psutil_compat.disk_io_counters", + MagicMock(return_value=STUB_DISK_IO_PERDISK), + ): + assert { + "read_time": 2000, + "write_bytes": 600, + "read_bytes": 500, + "write_time": 3000, + "read_count": 1000, + "write_count": 2000, + } == ps.disk_io_counters(device="nvme0n1p1") + + assert not ps.disk_io_counters(device="nvme0n1p4") + def test_get_users(stub_user): with patch("salt.utils.psutil_compat.users", MagicMock(return_value=[stub_user])): @@ -438,6 +705,134 @@ def test_top(): result = ps.top(num_processes=1, interval=0) assert len(result) == 1 + cmdline = ["echo", "питон"] + top_proc = DummyProcess(cmdline=cmdline) + + with patch("salt.utils.psutil_compat.pids", return_value=[1]): + with patch("salt.utils.psutil_compat.Process") as mock_process: + mock_process.side_effect = psutil.NoSuchProcess(top_proc) + ret = ps.top(num_processes=1, interval=0) + assert ret == [] + + if salt.utils.platform.is_windows(): + scputimes = namedtuple( + "scputimes", ["user", "system", "children_user", "children_system"] + ) + zombie_cpu_times = scputimes(0, 0, 0, 0) + + smem_info = namedtuple( + "pmem", + [ + "rss", + "vms", + "num_page_faults", + "peak_wset", + "wset", + "peak_paged_pool", + "paged_pool", + "peak_nonpaged_pool", + "nonpaged_pool28144", + "pagefile", + "peak_pagefile", + "private", + ], + ) + zombie_mem_info = smem_info(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + else: + scputimes = namedtuple( + "scputimes", + ["user", "system", "children_user", "children_system", "iowait"], + ) + zombie_cpu_times = scputimes(0, 0, 0, 0, 0) + + smem_info = namedtuple( + "pmem", ["rss", "vms", "shared", "text", "lib", "data", "dirty"] + ) + zombie_mem_info = smem_info(0, 0, 0, 0, 0, 0, 0) + + with patch("salt.utils.psutil_compat.pids", return_value=[1]): + with patch("salt.utils.psutil_compat.Process", return_value=top_proc): + with patch.object(top_proc, "cpu_times") as mock_cpu_times: + with patch.object( + top_proc, "memory_info", return_value=zombie_mem_info, create=True + ): + mock_cpu_times.side_effect = [ + psutil.ZombieProcess(top_proc), + zombie_cpu_times, + zombie_cpu_times, + ] + ret = ps.top(num_processes=1, interval=0) + + if salt.utils.platform.is_windows(): + expected_mem = { + "rss": 0, + "vms": 0, + "num_page_faults": 0, + "peak_wset": 0, + "wset": 0, + "peak_paged_pool": 0, + "paged_pool": 0, + "peak_nonpaged_pool": 0, + "nonpaged_pool28144": 0, + "pagefile": 0, + "peak_pagefile": 0, + "private": 0, + } + + expected_cpu = { + "user": 0, + "system": 0, + "children_user": 0, + "children_system": 0, + } + + else: + expected_mem = { + "rss": 0, + "vms": 0, + "shared": 0, + "text": 0, + "lib": 0, + "data": 0, + "dirty": 0, + } + + expected_cpu = { + "user": 0, + "system": 0, + "children_user": 0, + "children_system": 0, + "iowait": 0, + } + + assert ret[0]["mem"] == expected_mem + assert ret[0]["cpu"] == expected_cpu + + with patch("salt.utils.psutil_compat.pids", return_value=[1]): + with patch("salt.utils.psutil_compat.Process", return_value=top_proc): + with patch.object(top_proc, "cpu_times") as mock_cpu_times: + mock_cpu_times.side_effect = [ + top_proc._cpu_times, + psutil.NoSuchProcess(top_proc), + ] + ret = ps.top(num_processes=1, interval=0) + assert ret == [] + + with patch("salt.utils.psutil_compat.pids", return_value=[1]): + with patch("salt.utils.psutil_compat.Process", return_value=top_proc): + with patch.object(top_proc, "cpu_times") as mock_cpu_times: + with patch.object( + top_proc, "memory_info", create=True + ) as mock_memory_info: + mock_memory_info.side_effect = psutil.NoSuchProcess(top_proc) + mock_cpu_times.side_effect = [ + psutil.ZombieProcess(top_proc), + zombie_cpu_times, + zombie_cpu_times, + ] + ret = ps.top(num_processes=1, interval=0) + assert ret == [] + def test_top_zombie_process(): # Get 3 pids that are currently running on the system @@ -506,3 +901,388 @@ def test_status_when_access_denied_from_psutil_then_raise_exception(): # @patch('salt.utils.psutil_compat.get_users', new=MagicMock(return_value=None)) # This will force the function to use utmp # def test_get_users_utmp(): # pass + + +def test_psaux(): + """ + Testing psaux function in the ps module + """ + + cmd_run_mock = """ +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.0 0.0 171584 15740 ? Ss Aug09 4:18 /usr/lib/systemd/systemd --system --deserialize=83 +root 2 0.0 0.0 0 0 ? S Aug09 0:02 [kthreadd] +root 2710129 0.0 0.0 18000 7428 pts/4 S+ Aug21 0:33 sudo -E salt-master -l debug +root 2710131 0.0 0.0 18000 1196 pts/6 Ss Aug21 0:00 sudo -E salt-master -l debug +""" + + with patch.dict(ps.__salt__, {"cmd.run": MagicMock(return_value=cmd_run_mock)}): + expected = [ + "salt-master", + [ + "root 2710129 0.0 0.0 18000 7428 pts/4 S+ Aug21 0:33 sudo -E salt-master -l debug", + "root 2710131 0.0 0.0 18000 1196 pts/6 Ss Aug21 0:00 sudo -E salt-master -l debug", + ], + "2 occurrence(s).", + ] + ret = ps.psaux("salt-master") + assert ret == expected + + expected = ["salt-minion", [], "0 occurrence(s)."] + ret = ps.psaux("salt-minion") + assert ret == expected + + +@pytest.mark.skip_on_windows(reason="ss not available in Windows") +def test_ss(): + """ + Testing ss function in the ps module + """ + + cmd_run_mock = """ +tcp LISTEN 0 128 0.0.0.0:22 0.0.0.0:* ino:31907 sk:364b cgroup:/system.slice/sshd.service <-> + +tcp LISTEN 0 128 [::]:22 [::]:* ino:31916 sk:36c4 cgroup:/system.slice/sshd.service v6only:1 <-> +""" + + with patch( + "salt.utils.path.which", MagicMock(return_value="/usr/sbin/ss") + ), patch.dict(ps.__salt__, {"cmd.run": MagicMock(return_value=cmd_run_mock)}): + expected = [ + "sshd", + [ + "tcp LISTEN 0 128 0.0.0.0:22 0.0.0.0:* ino:31907 sk:364b cgroup:/system.slice/sshd.service <->", + "tcp LISTEN 0 128 [::]:22 [::]:* ino:31916 sk:36c4 cgroup:/system.slice/sshd.service v6only:1 <->", + ], + ] + ret = ps.ss("sshd") + assert ret == expected + + expected = ["apache2", []] + ret = ps.ss("apache2") + assert ret == expected + + +def test_netstat(): + """ + Testing netstat function in the ps module + """ + + cmd_run_mock = """ +Active Internet connections (servers and established) +Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name +tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 668/sshd: /usr/sbin +tcp6 0 0 :::22 :::* LISTEN 668/sshd: /usr/sbin +""" + + with patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/netstat")): + with patch.dict(ps.__salt__, {"cmd.run": MagicMock(return_value=cmd_run_mock)}): + expected = [ + "sshd", + [ + "tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 668/sshd: /usr/sbin", + "tcp6 0 0 :::22 :::* LISTEN 668/sshd: /usr/sbin", + ], + ] + ret = ps.netstat("sshd") + assert ret == expected + + expected = ["apache2", []] + ret = ps.netstat("apache2") + assert ret == expected + + +def test_lsof(): + """ + Testing lsof function in the ps module + """ + + sshd_cmd_run_mock = """ +COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME +sshd 1743 root cwd DIR 254,0 4096 2 / +sshd 1743 root rtd DIR 254,0 4096 2 / +sshd 1743 root txt REG 254,0 925000 7533685 /usr/bin/sshd (deleted) +sshd 1743 root DEL REG 254,0 7481413 /usr/lib/libc.so.6 +sshd 1743 root DEL REG 254,0 7477716 /usr/lib/libcrypto.so.3 +sshd 1743 root mem REG 254,0 26520 7482162 /usr/lib/libcap-ng.so.0.0.0 +sshd 1743 root DEL REG 254,0 7512187 /usr/lib/libresolv.so.2 +sshd 1743 root mem REG 254,0 22400 7481786 /usr/lib/libkeyutils.so.1.10 +sshd 1743 root mem REG 254,0 55352 7480841 /usr/lib/libkrb5support.so.0.1 +sshd 1743 root mem REG 254,0 18304 7475778 /usr/lib/libcom_err.so.2.1 +sshd 1743 root mem REG 254,0 182128 7477432 /usr/lib/libk5crypto.so.3.1 +sshd 1743 root DEL REG 254,0 7485543 /usr/lib/libaudit.so.1.0.0 +sshd 1743 root DEL REG 254,0 7485432 /usr/lib/libz.so.1.2.13 +sshd 1743 root mem REG 254,0 882552 7480814 /usr/lib/libkrb5.so.3.3 +sshd 1743 root mem REG 254,0 344160 7475833 /usr/lib/libgssapi_krb5.so.2.2 +sshd 1743 root mem REG 254,0 67536 7482132 /usr/lib/libpam.so.0.85.1 +sshd 1743 root mem REG 254,0 165832 7481746 /usr/lib/libcrypt.so.2.0.0 +sshd 1743 root DEL REG 254,0 7480993 /usr/lib/ld-linux-x86-64.so.2 +sshd 1743 root 0r CHR 1,3 0t0 4 /dev/null +sshd 1743 root 1u unix 0x0000000000000000 0t0 32930 type=STREAM (CONNECTED) +sshd 1743 root 2u unix 0x0000000000000000 0t0 32930 type=STREAM (CONNECTED) +sshd 1743 root 3u IPv4 31907 0t0 TCP *:ssh (LISTEN) +sshd 1743 root 4u IPv6 31916 0t0 TCP *:ssh (LISTEN) +""" + + apache2_cmd_run_mock = "" + + with patch("salt.utils.path.which", MagicMock(return_value="/usr/bin/netstat")): + with patch.dict( + ps.__salt__, {"cmd.run": MagicMock(return_value=sshd_cmd_run_mock)} + ): + expected = [ + "sshd", + "\nCOMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME\nsshd 1743 root cwd DIR 254,0 4096 2 /\nsshd 1743 root rtd DIR 254,0 4096 2 /\nsshd 1743 root txt REG 254,0 925000 7533685 /usr/bin/sshd (deleted)\nsshd 1743 root DEL REG 254,0 7481413 /usr/lib/libc.so.6\nsshd 1743 root DEL REG 254,0 7477716 /usr/lib/libcrypto.so.3\nsshd 1743 root mem REG 254,0 26520 7482162 /usr/lib/libcap-ng.so.0.0.0\nsshd 1743 root DEL REG 254,0 7512187 /usr/lib/libresolv.so.2\nsshd 1743 root mem REG 254,0 22400 7481786 /usr/lib/libkeyutils.so.1.10\nsshd 1743 root mem REG 254,0 55352 7480841 /usr/lib/libkrb5support.so.0.1\nsshd 1743 root mem REG 254,0 18304 7475778 /usr/lib/libcom_err.so.2.1\nsshd 1743 root mem REG 254,0 182128 7477432 /usr/lib/libk5crypto.so.3.1\nsshd 1743 root DEL REG 254,0 7485543 /usr/lib/libaudit.so.1.0.0\nsshd 1743 root DEL REG 254,0 7485432 /usr/lib/libz.so.1.2.13\nsshd 1743 root mem REG 254,0 882552 7480814 /usr/lib/libkrb5.so.3.3\nsshd 1743 root mem REG 254,0 344160 7475833 /usr/lib/libgssapi_krb5.so.2.2\nsshd 1743 root mem REG 254,0 67536 7482132 /usr/lib/libpam.so.0.85.1\nsshd 1743 root mem REG 254,0 165832 7481746 /usr/lib/libcrypt.so.2.0.0\nsshd 1743 root DEL REG 254,0 7480993 /usr/lib/ld-linux-x86-64.so.2\nsshd 1743 root 0r CHR 1,3 0t0 4 /dev/null\nsshd 1743 root 1u unix 0x0000000000000000 0t0 32930 type=STREAM (CONNECTED)\nsshd 1743 root 2u unix 0x0000000000000000 0t0 32930 type=STREAM (CONNECTED)\nsshd 1743 root 3u IPv4 31907 0t0 TCP *:ssh (LISTEN)\nsshd 1743 root 4u IPv6 31916 0t0 TCP *:ssh (LISTEN)\n", + ] + ret = ps.lsof("sshd") + assert ret == expected + + with patch.dict( + ps.__salt__, {"cmd.run": MagicMock(return_value=apache2_cmd_run_mock)} + ): + expected = ["apache2", ""] + ret = ps.lsof("apache2") + assert ret == expected + + +def test_boot_time(): + """ + Testing boot_time function in the ps module + """ + + with patch( + "salt.utils.psutil_compat.boot_time", MagicMock(return_value=1691593290.0) + ): + expected = 1691593290 + ret = ps.boot_time() + assert ret == expected + + expected = "08/09/2023" + ret = ps.boot_time(time_format="%m/%d/%Y") + assert ret == expected + + with patch("salt.utils.psutil_compat.boot_time") as mock_boot_time: + mock_boot_time.side_effect = [AttributeError(), 1691593290.0] + expected = 1691593290 + ret = ps.boot_time() + assert ret == expected + + +def test_num_cpus(): + """ + Testing num_cpus function in the ps module + """ + + with patch("salt.utils.psutil_compat.cpu_count") as mock_cpu_count: + mock_cpu_count.side_effect = AttributeError() + with patch("salt.utils.psutil_compat.NUM_CPUS", create=True, new=5): + ret = ps.num_cpus() + assert ret == 5 + + with patch("salt.utils.psutil_compat.cpu_count") as mock_cpu_count: + mock_cpu_count.return_value = 5 + ret = ps.num_cpus() + assert ret == 5 + + +def test_total_physical_memory(stub_memory_usage): + """ + Testing total_physical_memory function in the ps module + """ + + with patch("salt.modules.ps.psutil.version_info", (0, 5, 9)): + with pytest.raises(CommandExecutionError) as exc: + ps.total_physical_memory() + assert ( + exc.value.error + == "virtual_memory is only available in psutil 0.6.0 or greater" + ) + + with patch("salt.utils.psutil_compat.virtual_memory") as mock_total_physical_memory: + mock_total_physical_memory.side_effect = AttributeError() + with patch( + "salt.utils.psutil_compat.TOTAL_PHYMEM", + create=True, + new=stub_memory_usage.total, + ): + ret = ps.total_physical_memory() + assert ret == 15722012672 + + with patch("salt.utils.psutil_compat.virtual_memory") as mock_total_physical_memory: + mock_total_physical_memory.return_value = stub_memory_usage + ret = ps.total_physical_memory() + assert ret == 15722012672 + + +def test_proc_info(): + """ + Testing proc_info function in the ps module + """ + status = b"fnord" + extra_data = { + "utime": "42", + "stime": "42", + "children_utime": "42", + "children_stime": "42", + "ttynr": "42", + "cpu_time": "42", + "blkio_ticks": "99", + "ppid": "99", + "cpu_num": "9999999", + } + important_data = { + "name": b"blerp", + "status": status, + "create_time": "393829200", + "username": "root", + } + important_data.update(extra_data) + status_file = b"Name:\tblerp\nUmask:\t0000\nState:\tI (idle)\nTgid:\t99\nNgid:\t0\nPid:\t99\nPPid:\t2\nTracerPid:\t0\nUid:\t0\t0\t0\t0\nGid:\t0\t0\t0\t0\nFDSize:\t64\nGroups:\t \nNStgid:\t99\nNSpid:\t99\nNSpgid:\t0\nNSsid:\t0\nThreads:\t1\nSigQ:\t3/256078\nSigPnd:\t0000000000000000\nShdPnd:\t0000000000000000\nSigBlk:\t0000000000000000\nSigIgn:\tffffffffffffffff\nSigCgt:\t0000000000000000\nCapInh:\t0000000000000000\nCapPrm:\t000001ffffffffff\nCapEff:\t000001ffffffffff\nCapBnd:\t000001ffffffffff\nCapAmb:\t0000000000000000\nNoNewPrivs:\t0\nSeccomp:\t0\nSeccomp_filters:\t0\nSpeculation_Store_Bypass:\tthread vulnerable\nSpeculationIndirectBranch:\tconditional enabled\nCpus_allowed:\tfff\nCpus_allowed_list:\t0-11\nMems_allowed:\t00000001\nMems_allowed_list:\t0\nvoluntary_ctxt_switches:\t2\nnonvoluntary_ctxt_switches:\t0\n" + + patch_stat_file = patch( + "psutil._psplatform.Process._parse_stat_file", + return_value=important_data, + create=True, + ) + patch_exe = patch( + "psutil._psplatform.Process.exe", + return_value=important_data["name"].decode(), + create=True, + ) + patch_oneshot = patch( + "psutil._psplatform.Process.oneshot", + return_value={ + # These keys can be found in psutil/_psbsd.py + 1: important_data["status"].decode(), + # create + 9: float(important_data["create_time"]), + # user + 14: float(important_data["create_time"]), + # sys + 15: float(important_data["create_time"]), + # ch_user + 16: float(important_data["create_time"]), + # ch_sys -- we don't really care what they are, obviously + 17: float(important_data["create_time"]), + 24: important_data["name"].decode(), + }, + create=True, + ) + patch_kinfo = patch( + "psutil._psplatform.Process._get_kinfo_proc", + return_value={ + # These keys can be found in psutil/_psosx.py + 9: important_data["status"].decode(), + 8: float(important_data["create_time"]), + 10: important_data["name"].decode(), + }, + create=True, + ) + patch_status = patch( + "psutil._psplatform.Process.status", return_value=status.decode() + ) + patch_create_time = patch( + "psutil._psplatform.Process.create_time", return_value=393829200 + ) + with patch_stat_file, patch_status, patch_create_time, patch_exe, patch_oneshot, patch_kinfo: + if salt.utils.platform.is_windows(): + with patch("psutil._pswindows.cext") as mock__psutil_windows: + with patch("psutil._pswindows.Process.ppid", return_value=99): + mock__psutil_windows.proc_username.return_value = ( + "NT Authority", + "System", + ) + + expected = {"ppid": 99, "username": r"NT Authority\System"} + actual_result = salt.modules.ps.proc_info( + pid=99, attrs=["username", "ppid"] + ) + assert actual_result == expected + + expected = {"pid": 99, "name": "blerp"} + actual_result = salt.modules.ps.proc_info( + pid=99, attrs=["pid", "name"] + ) + assert actual_result == expected + else: + patch_read_status_file = patch( + "psutil._psplatform.Process._read_status_file", return_value=status_file + ) + with patch_read_status_file: + expected = {"ppid": 99, "username": "root"} + actual_result = salt.modules.ps.proc_info( + pid=99, attrs=["username", "ppid"] + ) + assert actual_result == expected + + expected = {"pid": 99, "name": "blerp"} + actual_result = salt.modules.ps.proc_info(pid=99, attrs=["pid", "name"]) + assert actual_result == expected + + +def test_proc_info_access_denied(): + """ + Testing proc_info function in the ps module + when an AccessDenied exception occurs + """ + cmdline = ["echo", "питон"] + dummy_proc = DummyProcess(cmdline=cmdline) + with patch("salt.utils.psutil_compat.Process") as mock_process: + mock_process.side_effect = psutil.AccessDenied(dummy_proc) + with pytest.raises(CommandExecutionError): + salt.modules.ps.proc_info(pid=99, attrs=["username", "ppid"]) + + +def test_proc_info_no_such_process(): + """ + Testing proc_info function in the ps module + when an NoSuchProcess exception occurs + """ + cmdline = ["echo", "питон"] + dummy_proc = DummyProcess(cmdline=cmdline) + with patch("salt.utils.psutil_compat.Process") as mock_process: + mock_process.side_effect = psutil.NoSuchProcess(dummy_proc) + with pytest.raises(CommandExecutionError): + salt.modules.ps.proc_info(pid=99, attrs=["username", "ppid"]) + + +def test_proc_info_attribute_error(): + """ + Testing proc_info function in the ps module + when an AttributeError exception occurs + """ + cmdline = ["echo", "питон"] + with patch("salt.utils.psutil_compat.Process") as mock_process: + mock_process.side_effect = AttributeError() + with pytest.raises(CommandExecutionError): + salt.modules.ps.proc_info(pid=99, attrs=["username", "ppid"]) + + +def test__virtual__no_psutil(): + """ + Test __virtual__ function + """ + with patch.object(ps, "HAS_PSUTIL", False): + expected = ( + False, + "The ps module cannot be loaded: python module psutil not installed.", + ) + result = ps.__virtual__() + assert result == expected + + +def test__virtual__wrong_version(): + with patch("salt.modules.ps.psutil.version_info", (0, 2, 9)): + expected = ( + False, + "The ps execution module cannot be loaded: the psutil python module version {}" + " is less than 0.3.0".format(psutil.version_info), + ) + result = ps.__virtual__() + assert result == expected + + +def test__virtual__correct_version(): + with patch("salt.modules.ps.psutil.version_info", (0, 3, 0)): + result = ps.__virtual__() + assert result diff --git a/tests/pytests/unit/modules/test_redismod.py b/tests/pytests/unit/modules/test_redismod.py new file mode 100644 index 000000000000..81f8a3545c8a --- /dev/null +++ b/tests/pytests/unit/modules/test_redismod.py @@ -0,0 +1,483 @@ +""" + :codeauthor: Jayesh Kariya + + Test cases for salt.modules.redismod +""" + +from datetime import datetime + +import pytest + +import salt.modules.redismod as redismod +from tests.support.mock import MagicMock + + +class Mockredis: + """ + Mock redis class + """ + + class ConnectionError(Exception): + """ + Mock ConnectionError class + """ + + +class MockConnect: + """ + Mock Connect class + """ + + counter = 0 + + def __init__(self): + self.name = None + self.pattern = None + self.value = None + self.key = None + self.seconds = None + self.timestamp = None + self.field = None + self.start = None + self.stop = None + self.master_host = None + self.master_port = None + + @staticmethod + def bgrewriteaof(): + """ + Mock bgrewriteaof method + """ + return "A" + + @staticmethod + def bgsave(): + """ + Mock bgsave method + """ + return "A" + + def config_get(self, pattern): + """ + Mock config_get method + """ + self.pattern = pattern + return "A" + + def config_set(self, name, value): + """ + Mock config_set method + """ + self.name = name + self.value = value + return "A" + + @staticmethod + def dbsize(): + """ + Mock dbsize method + """ + return "A" + + @staticmethod + def delete(): + """ + Mock delete method + """ + return "A" + + def exists(self, key): + """ + Mock exists method + """ + self.key = key + return "A" + + def expire(self, key, seconds): + """ + Mock expire method + """ + self.key = key + self.seconds = seconds + return "A" + + def expireat(self, key, timestamp): + """ + Mock expireat method + """ + self.key = key + self.timestamp = timestamp + return "A" + + @staticmethod + def flushall(): + """ + Mock flushall method + """ + return "A" + + @staticmethod + def flushdb(): + """ + Mock flushdb method + """ + return "A" + + def get(self, key): + """ + Mock get method + """ + self.key = key + return "A" + + def hget(self, key, field): + """ + Mock hget method + """ + self.key = key + self.field = field + return "A" + + def hgetall(self, key): + """ + Mock hgetall method + """ + self.key = key + return "A" + + @staticmethod + def info(): + """ + Mock info method + """ + return "A" + + def keys(self, pattern): + """ + Mock keys method + """ + self.pattern = pattern + return "A" + + def type(self, key): + """ + Mock type method + """ + self.key = key + return "A" + + @staticmethod + def lastsave(): + """ + Mock lastsave method + """ + return datetime.now() + + def llen(self, key): + """ + Mock llen method + """ + self.key = key + return "A" + + def lrange(self, key, start, stop): + """ + Mock lrange method + """ + self.key = key + self.start = start + self.stop = stop + return "A" + + @staticmethod + def ping(): + """ + Mock ping method + """ + MockConnect.counter = MockConnect.counter + 1 + if MockConnect.counter == 1: + return "A" + elif MockConnect.counter in (2, 3, 5): + raise Mockredis.ConnectionError("foo") + + @staticmethod + def save(): + """ + Mock save method + """ + return "A" + + def set(self, key, value): + """ + Mock set method + """ + self.key = key + self.value = value + return "A" + + @staticmethod + def shutdown(): + """ + Mock shutdown method + """ + return "A" + + def slaveof(self, master_host, master_port): + """ + Mock slaveof method + """ + self.master_host = master_host + self.master_port = master_port + return "A" + + def smembers(self, key): + """ + Mock smembers method + """ + self.key = key + return "A" + + @staticmethod + def time(): + """ + Mock time method + """ + return "A" + + def zcard(self, key): + """ + Mock zcard method + """ + self.key = key + return "A" + + def zrange(self, key, start, stop): + """ + Mock zrange method + """ + self.key = key + self.start = start + self.stop = stop + return "A" + + +@pytest.fixture +def configure_loader_modules(): + return { + redismod: { + "redis": Mockredis, + "_connect": MagicMock(return_value=MockConnect()), + } + } + + +def test_bgrewriteaof(): + """ + Test to asynchronously rewrite the append-only file + """ + assert redismod.bgrewriteaof() == "A" + + +def test_bgsave(): + """ + Test to asynchronously save the dataset to disk + """ + assert redismod.bgsave() == "A" + + +def test_config_get(): + """ + Test to get redis server configuration values + """ + assert redismod.config_get("*") == "A" + + +def test_config_set(): + """ + Test to set redis server configuration values + """ + assert redismod.config_set("name", "value") == "A" + + +def test_dbsize(): + """ + Test to return the number of keys in the selected database + """ + assert redismod.dbsize() == "A" + + +def test_delete(): + """ + Test to deletes the keys from redis, returns number of keys deleted + """ + assert redismod.delete() == "A" + + +def test_exists(): + """ + Test to return true if the key exists in redis + """ + assert redismod.exists("key") == "A" + + +def test_expire(): + """ + Test to set a keys time to live in seconds + """ + assert redismod.expire("key", "seconds") == "A" + + +def test_expireat(): + """ + Test to set a keys expire at given UNIX time + """ + assert redismod.expireat("key", "timestamp") == "A" + + +def test_flushall(): + """ + Test to remove all keys from all databases + """ + assert redismod.flushall() == "A" + + +def test_flushdb(): + """ + Test to remove all keys from the selected database + """ + assert redismod.flushdb() == "A" + + +def test_get_key(): + """ + Test to get redis key value + """ + assert redismod.get_key("key") == "A" + + +def test_hget(): + """ + Test to get specific field value from a redis hash, returns dict + """ + assert redismod.hget("key", "field") == "A" + + +def test_hgetall(): + """ + Test to get all fields and values from a redis hash, returns dict + """ + assert redismod.hgetall("key") == "A" + + +def test_info(): + """ + Test to get information and statistics about the server + """ + assert redismod.info() == "A" + + +def test_keys(): + """ + Test to get redis keys, supports glob style patterns + """ + assert redismod.keys("pattern") == "A" + + +def test_key_type(): + """ + Test to get redis key type + """ + assert redismod.key_type("key") == "A" + + +def test_lastsave(): + """ + Test to get the UNIX time in seconds of the last successful + save to disk + """ + assert redismod.lastsave() + + +def test_llen(): + """ + Test to get the length of a list in Redis + """ + assert redismod.llen("key") == "A" + + +def test_lrange(): + """ + Test to get a range of values from a list in Redis + """ + assert redismod.lrange("key", "start", "stop") == "A" + + +def test_ping(): + """ + Test to ping the server, returns False on connection errors + """ + assert redismod.ping() == "A" + + assert not redismod.ping() + + +def test_save(): + """ + Test to synchronously save the dataset to disk + """ + assert redismod.save() == "A" + + +def test_set_key(): + """ + Test to set redis key value + """ + assert redismod.set_key("key", "value") == "A" + + +def test_shutdown(): + """ + Test to synchronously save the dataset to disk and then + shut down the server + """ + assert not redismod.shutdown() + + assert redismod.shutdown() + + assert not redismod.shutdown() + + +def test_slaveof(): + """ + Test to make the server a slave of another instance, or + promote it as master + """ + assert redismod.slaveof("master_host", "master_port") == "A" + + +def test_smembers(): + """ + Test to get members in a Redis set + """ + assert redismod.smembers("key") == ["A"] + + +def test_time(): + """ + Test to return the current server UNIX time in seconds + """ + assert redismod.time() == "A" + + +def test_zcard(): + """ + Test to get the length of a sorted set in Redis + """ + assert redismod.zcard("key") == "A" + + +def test_zrange(): + """ + Test to get a range of values from a sorted set in Redis by index + """ + assert redismod.zrange("key", "start", "stop") == "A" diff --git a/tests/pytests/unit/modules/test_serverdensity_device.py b/tests/pytests/unit/modules/test_serverdensity_device.py new file mode 100644 index 000000000000..6d79aefa8134 --- /dev/null +++ b/tests/pytests/unit/modules/test_serverdensity_device.py @@ -0,0 +1,195 @@ +""" + :codeauthor: Jayesh Kariya + + TestCase for salt.modules.serverdensity_device +""" + + +import pytest + +import salt.modules.serverdensity_device as serverdensity_device +import salt.utils.json +from salt.exceptions import CommandExecutionError +from tests.support.mock import MagicMock, patch + + +class MockRequests: + """ + Mock smtplib class + """ + + flag = None + content = """{"message": "Invalid token", "errors": [{"type": "invalid_token", "subject": "token"}]}""" + status_code = None + + def __init__(self): + self.url = None + self.data = None + self.kwargs = None + + def return_request(self, url, data=None, **kwargs): + """ + Mock request method. + """ + self.url = url + self.data = data + self.kwargs = kwargs + requests = MockRequests() + if self.flag == 1: + requests.status_code = 401 + else: + requests.status_code = 200 + return requests + + def post(self, url, data=None, **kwargs): + """ + Mock post method. + """ + return self.return_request(url, data, **kwargs) + + def delete(self, url, **kwargs): + """ + Mock delete method. + """ + return self.return_request(url, **kwargs) + + def get(self, url, **kwargs): + """ + Mock get method. + """ + return self.return_request(url, **kwargs) + + def put(self, url, data=None, **kwargs): + """ + Mock put method. + """ + return self.return_request(url, data, **kwargs) + + +@pytest.fixture +def configure_loader_modules(): + return {serverdensity_device: {"requests": MockRequests()}} + + +@pytest.fixture +def mock_json_loads(): + return MagicMock(side_effect=ValueError()) + + +def test_get_sd_auth(): + """ + Tests if it returns requested Server Density + authentication value from pillar. + """ + with patch.dict(serverdensity_device.__pillar__, {"serverdensity": False}): + pytest.raises(CommandExecutionError, serverdensity_device.get_sd_auth, "1") + + with patch.dict(serverdensity_device.__pillar__, {"serverdensity": {"1": "salt"}}): + assert serverdensity_device.get_sd_auth("1") == "salt" + + pytest.raises(CommandExecutionError, serverdensity_device.get_sd_auth, "2") + + +def test_create(mock_json_loads): + """ + Tests if it create device in Server Density. + """ + with patch.dict( + serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} + ): + assert serverdensity_device.create("rich_lama", group="lama_band") + + with patch.object(salt.utils.json, "loads", mock_json_loads): + pytest.raises( + CommandExecutionError, + serverdensity_device.create, + "rich_lama", + group="lama_band", + ) + + MockRequests.flag = 1 + assert serverdensity_device.create("rich_lama", group="lama_band") is None + + +def test_delete(mock_json_loads): + """ + Tests if it delete a device from Server Density. + """ + with patch.dict( + serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} + ): + MockRequests.flag = 0 + assert serverdensity_device.delete("51f7eaf") + + with patch.object(salt.utils.json, "loads", mock_json_loads): + pytest.raises(CommandExecutionError, serverdensity_device.delete, "51f7eaf") + + MockRequests.flag = 1 + assert serverdensity_device.delete("51f7eaf") is None + + +def test_ls(mock_json_loads): + """ + Tests if it list devices in Server Density. + """ + with patch.dict( + serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} + ): + MockRequests.flag = 0 + assert serverdensity_device.ls(name="lama") + + with patch.object(salt.utils.json, "loads", mock_json_loads): + pytest.raises(CommandExecutionError, serverdensity_device.ls, name="lama") + + MockRequests.flag = 1 + assert serverdensity_device.ls(name="lama") is None + + +def test_update(mock_json_loads): + """ + Tests if it updates device information in Server Density. + """ + with patch.dict( + serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} + ): + MockRequests.flag = 0 + assert serverdensity_device.update("51f7eaf", name="lama") + + with patch.object(salt.utils.json, "loads", mock_json_loads): + pytest.raises( + CommandExecutionError, + serverdensity_device.update, + "51f7eaf", + name="lama", + ) + + MockRequests.flag = 1 + assert serverdensity_device.update("51f7eaf", name="lama") is None + + +def test_install_agent(): + """ + Tests if it downloads Server Density installation agent, + and installs sd-agent with agent_key. + """ + mock = MagicMock(return_value=True) + with patch.dict( + serverdensity_device.__pillar__, {"serverdensity": {"account_url": "salt"}} + ): + with patch.dict(serverdensity_device.__salt__, {"cmd.run": mock}): + with patch.dict(serverdensity_device.__opts__, {"cachedir": "/"}): + assert serverdensity_device.install_agent("51f7e") + + +def test_install_agent_v2(): + """ + Tests if it downloads Server Density installation agent, + and installs sd-agent with agent_key. + """ + mock = MagicMock(return_value=True) + with patch.dict( + serverdensity_device.__pillar__, {"serverdensity": {"account_name": "salt"}} + ): + with patch.dict(serverdensity_device.__salt__, {"cmd.run": mock}): + with patch.dict(serverdensity_device.__opts__, {"cachedir": "/"}): + assert serverdensity_device.install_agent("51f7e", agent_version=2) diff --git a/tests/pytests/unit/modules/test_servicenow.py b/tests/pytests/unit/modules/test_servicenow.py new file mode 100644 index 000000000000..1971b63f653e --- /dev/null +++ b/tests/pytests/unit/modules/test_servicenow.py @@ -0,0 +1,66 @@ +""" + :codeauthor: Anthony Shaw + + TestCase for salt.modules.servicenow +""" + + +import pytest + +import salt.modules.servicenow as servicenow +from tests.support.mock import MagicMock + + +class MockServiceNowClient: + def __init__(self, instance_name, username, password): + pass + + def get(self, query): + return [{"query_size": len(query), "query_value": query}] + + +@pytest.fixture +def configure_loader_modules(): + module_globals = { + "Client": MockServiceNowClient, + "__salt__": { + "config.option": MagicMock( + return_value={ + "instance_name": "test", + "username": "mr_test", + "password": "test123", + } + ) + }, + } + if servicenow.HAS_LIBS is False: + module_globals["sys.modules"] = {"servicenow_rest": MagicMock()} + module_globals["sys.modules"][ + "servicenow_rest" + ].api.Client = MockServiceNowClient + return {servicenow: module_globals} + + +def test_module_creation(): + client = servicenow._get_client() + assert client is not None + + +def test_non_structured_query(): + result = servicenow.non_structured_query("tests", "role=web") + assert result is not None + assert result[0]["query_size"] == 8 + assert result[0]["query_value"] == "role=web" + + +def test_non_structured_query_kwarg(): + result = servicenow.non_structured_query("tests", role="web") + assert result is not None + assert result[0]["query_size"] == 8 + assert result[0]["query_value"] == "role=web" + + +def test_non_structured_query_kwarg_multi(): + result = servicenow.non_structured_query("tests", role="web", type="computer") + assert result is not None + assert result[0]["query_size"] == 22 diff --git a/tests/unit/modules/test_smtp.py b/tests/pytests/unit/modules/test_smtp.py similarity index 55% rename from tests/unit/modules/test_smtp.py rename to tests/pytests/unit/modules/test_smtp.py index 32298fcdcf95..983bb209cf24 100644 --- a/tests/unit/modules/test_smtp.py +++ b/tests/pytests/unit/modules/test_smtp.py @@ -1,12 +1,14 @@ """ :codeauthor: Jayesh Kariya + + TestCase for salt.modules.smtp """ +import pytest + import salt.modules.smtp as smtp -from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase class SMTPRecipientsRefused(Exception): @@ -221,128 +223,106 @@ def SMTP(self, server): return MockSMTP("server") -class SmtpTestCase(TestCase, LoaderModuleMockMixin): +@pytest.fixture +def configure_loader_modules(): + return {smtp: {"socket": MockSocket(), "smtplib": MockSmtplib()}} + + +# 'send_msg' function tests: 1 + + +def test_send_msg(): """ - TestCase for salt.modules.smtp + Tests if it send a message to an SMTP recipient. """ + mock = MagicMock( + return_value={ + "smtp.server": "", + "smtp.tls": "True", + "smtp.sender": "", + "smtp.username": "", + "smtp.password": "", + } + ) + with patch.dict(smtp.__salt__, {"config.option": mock}): + assert smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", + ) - def setup_loader_modules(self): - return {smtp: {"socket": MockSocket(), "smtplib": MockSmtplib()}} + MockSMTPSSL.flag = 1 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", + ) - # 'send_msg' function tests: 1 + MockSMTPSSL.flag = 2 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", + ) - def test_send_msg(self): - """ - Tests if it send a message to an SMTP recipient. - """ - mock = MagicMock( - return_value={ - "smtp.server": "", - "smtp.tls": "True", - "smtp.sender": "", - "smtp.username": "", - "smtp.password": "", - } + MockSMTPSSL.flag = 3 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", + ) + + MockSMTPSSL.flag = 4 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", + ) + + mock = MagicMock( + return_value={ + "smtp.server": "", + "smtp.tls": "", + "smtp.sender": "", + "smtp.username": "", + "smtp.password": "", + } + ) + with patch.dict(smtp.__salt__, {"config.option": mock}): + MockSMTPSSL.flag = 5 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + username="myuser", + password="verybadpass", + sender="admin@example.com", + server="smtp.domain.com", + ) + + MockSMTP.flag = 1 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", + ) + + MockSMTP.flag = 2 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", ) - with patch.dict(smtp.__salt__, {"config.option": mock}): - self.assertTrue( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - MockSMTPSSL.flag = 1 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - MockSMTPSSL.flag = 2 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - MockSMTPSSL.flag = 3 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - MockSMTPSSL.flag = 4 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - mock = MagicMock( - return_value={ - "smtp.server": "", - "smtp.tls": "", - "smtp.sender": "", - "smtp.username": "", - "smtp.password": "", - } + + MockSMTP.flag = 3 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", + ) + + MockSmtplib.flag = 1 + assert not smtp.send_msg( + "admin@example.com", + "This is a salt module test", + profile="my-smtp-account", ) - with patch.dict(smtp.__salt__, {"config.option": mock}): - MockSMTPSSL.flag = 5 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - username="myuser", - password="verybadpass", - sender="admin@example.com", - server="smtp.domain.com", - ) - ) - - MockSMTP.flag = 1 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - MockSMTP.flag = 2 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - MockSMTP.flag = 3 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) - - MockSmtplib.flag = 1 - self.assertFalse( - smtp.send_msg( - "admin@example.com", - "This is a salt module test", - profile="my-smtp-account", - ) - ) diff --git a/tests/pytests/unit/modules/test_syslog_ng.py b/tests/pytests/unit/modules/test_syslog_ng.py new file mode 100644 index 000000000000..d481de33f96a --- /dev/null +++ b/tests/pytests/unit/modules/test_syslog_ng.py @@ -0,0 +1,357 @@ +""" + Test cases for salt.modules.syslog_ng +""" + + +import os +from textwrap import dedent + +import pytest + +import salt.modules.syslog_ng as syslog_ng +from tests.support.mock import MagicMock, patch + + +@pytest.fixture +def _version(): + return "3.6.0alpha0" + + +@pytest.fixture +def _modules(): + return ( + "syslogformat,json-plugin,basicfuncs,afstomp,afsocket,cryptofuncs," + "afmongodb,dbparser,system-source,affile,pseudofile,afamqp," + "afsocket-notls,csvparser,linux-kmsg-format,afuser,confgen,afprog" + ) + + +@pytest.fixture +def version_output(_version, _modules): + return """syslog-ng {0} +Installer-Version: {0} +Revision: +Compile-Date: Apr 4 2014 20:26:18 +Error opening plugin module; module='afsocket-tls', error='/home/tibi/install/syslog-ng/lib/syslog-ng/libafsocket-tls.so: undefined symbol: tls_context_setup_session' +Available-Modules: {1} +Enable-Debug: on +Enable-GProf: off +Enable-Memtrace: off +Enable-IPv6: on +Enable-Spoof-Source: off +Enable-TCP-Wrapper: off +Enable-Linux-Caps: off""".format( + _version, _modules + ) + + +@pytest.fixture +def stats_output(): + return """SourceName;SourceId;SourceInstance;State;Type;Number +center;;received;a;processed;0 +destination;#anon-destination0;;a;processed;0 +destination;#anon-destination1;;a;processed;0 +source;s_gsoc2014;;a;processed;0 +center;;queued;a;processed;0 +global;payload_reallocs;;a;processed;0 +global;sdata_updates;;a;processed;0 +global;msg_clones;;a;processed;0""" + + +@pytest.fixture +def orig_env(): + return {"PATH": "/foo:/bar"} + + +@pytest.fixture +def bin_dir(): + return "/baz" + + +@pytest.fixture +def mocked_env(): + return {"PATH": "/foo:/bar:/baz"} + + +@pytest.fixture +def configure_loader_modules(): + return {syslog_ng: {}} + + +def test_statement_without_options(): + s = syslog_ng.Statement("source", "s_local", options=[]) + b = s.build() + assert b == ( + dedent( + """\ + source s_local { + }; + """ + ) + ) + + +def test_non_empty_statement(): + o1 = syslog_ng.Option("file") + o2 = syslog_ng.Option("tcp") + s = syslog_ng.Statement("source", "s_local", options=[o1, o2]) + b = s.build() + assert b == ( + dedent( + """\ + source s_local { + file( + ); + tcp( + ); + }; + """ + ) + ) + + +def test_option_with_parameters(): + o1 = syslog_ng.Option("file") + p1 = syslog_ng.SimpleParameter('"/var/log/messages"') + p2 = syslog_ng.SimpleParameter() + p3 = syslog_ng.TypedParameter() + p3.type = "tls" + p2.value = '"/var/log/syslog"' + o1.add_parameter(p1) + o1.add_parameter(p2) + o1.add_parameter(p3) + b = o1.build() + assert b == ( + dedent( + """\ + file( + "/var/log/messages", + "/var/log/syslog", + tls( + ) + ); + """ + ) + ) + + +def test_parameter_with_values(): + p = syslog_ng.TypedParameter() + p.type = "tls" + v1 = syslog_ng.TypedParameterValue() + v1.type = "key_file" + + v2 = syslog_ng.TypedParameterValue() + v2.type = "cert_file" + + p.add_value(v1) + p.add_value(v2) + + b = p.build() + assert b == ( + dedent( + """\ + tls( + key_file( + ), + cert_file( + ) + )""" + ) + ) + + +def test_value_with_arguments(): + t = syslog_ng.TypedParameterValue() + t.type = "key_file" + + a1 = syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"') + a2 = syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"') + + t.add_argument(a1) + t.add_argument(a2) + + b = t.build() + assert b == ( + dedent( + """\ + key_file( + "/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key" + "/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key" + )""" + ) + ) + + +def test_end_to_end_statement_generation(): + s = syslog_ng.Statement("source", "s_tls") + + o = syslog_ng.Option("tcp") + + ip = syslog_ng.TypedParameter("ip") + ip.add_value(syslog_ng.SimpleParameterValue("'192.168.42.2'")) + o.add_parameter(ip) + + port = syslog_ng.TypedParameter("port") + port.add_value(syslog_ng.SimpleParameterValue(514)) + o.add_parameter(port) + + tls = syslog_ng.TypedParameter("tls") + key_file = syslog_ng.TypedParameterValue("key_file") + key_file.add_argument( + syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"') + ) + cert_file = syslog_ng.TypedParameterValue("cert_file") + cert_file.add_argument( + syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/cert.d/syslog-ng.cert"') + ) + peer_verify = syslog_ng.TypedParameterValue("peer_verify") + peer_verify.add_argument(syslog_ng.Argument("optional-untrusted")) + tls.add_value(key_file) + tls.add_value(cert_file) + tls.add_value(peer_verify) + o.add_parameter(tls) + + s.add_child(o) + b = s.build() + assert b == ( + dedent( + """\ + source s_tls { + tcp( + ip( + '192.168.42.2' + ), + port( + 514 + ), + tls( + key_file( + "/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key" + ), + cert_file( + "/opt/syslog-ng/etc/syslog-ng/cert.d/syslog-ng.cert" + ), + peer_verify( + optional-untrusted + ) + ) + ); + }; + """ + ) + ) + + +@pytest.mark.skip_on_windows(reason="Module not available on Windows") +def test_version(_version, version_output, orig_env, bin_dir, mocked_env): + cmd_ret = {"retcode": 0, "stdout": version_output} + expected_output = {"retcode": 0, "stdout": _version} + cmd_args = ["syslog-ng", "-V"] + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.version() + assert result == expected_output + cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.version(syslog_ng_sbin_dir=bin_dir) + assert result == expected_output + cmd_mock.assert_called_once_with(cmd_args, env=mocked_env, python_shell=False) + + +@pytest.mark.skip_on_windows(reason="Module not available on Windows") +def test_stats(stats_output, orig_env, bin_dir, mocked_env): + cmd_ret = {"retcode": 0, "stdout": stats_output} + cmd_args = ["syslog-ng-ctl", "stats"] + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.stats() + assert result == cmd_ret + cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.stats(syslog_ng_sbin_dir=bin_dir) + assert result == cmd_ret + cmd_mock.assert_called_once_with(cmd_args, env=mocked_env, python_shell=False) + + +@pytest.mark.skip_on_windows(reason="Module not available on Windows") +def test_modules(_modules, version_output, orig_env, bin_dir, mocked_env): + cmd_ret = {"retcode": 0, "stdout": version_output} + expected_output = {"retcode": 0, "stdout": _modules} + cmd_args = ["syslog-ng", "-V"] + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.modules() + assert result == expected_output + cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.modules(syslog_ng_sbin_dir=bin_dir) + assert result == expected_output + cmd_mock.assert_called_once_with(cmd_args, env=mocked_env, python_shell=False) + + +@pytest.mark.skip_on_windows(reason="Module not available on Windows") +def test_config_test(orig_env, bin_dir, mocked_env): + cmd_ret = {"retcode": 0, "stderr": "", "stdout": "Foo"} + cmd_args = ["syslog-ng", "--syntax-only"] + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.config_test() + assert result == cmd_ret + cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + result = syslog_ng.config_test(syslog_ng_sbin_dir=bin_dir) + assert result == cmd_ret + cmd_mock.assert_called_once_with(cmd_args, env=mocked_env, python_shell=False) + + +@pytest.mark.skip_on_windows(reason="Module not available on Windows") +def test_config_test_cfgfile(orig_env, bin_dir, mocked_env): + cfgfile = "/path/to/syslog-ng.conf" + cmd_ret = {"retcode": 1, "stderr": "Syntax error...", "stdout": ""} + cmd_args = ["syslog-ng", "--syntax-only", f"--cfgfile={cfgfile}"] + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + assert syslog_ng.config_test(cfgfile=cfgfile) == cmd_ret + cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) + + cmd_mock = MagicMock(return_value=cmd_ret) + with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( + os.environ, orig_env + ): + assert ( + syslog_ng.config_test(syslog_ng_sbin_dir=bin_dir, cfgfile=cfgfile) + == cmd_ret + ) + cmd_mock.assert_called_once_with(cmd_args, env=mocked_env, python_shell=False) diff --git a/tests/pytests/unit/modules/test_uwsgi.py b/tests/pytests/unit/modules/test_uwsgi.py new file mode 100644 index 000000000000..65d3f75a86a6 --- /dev/null +++ b/tests/pytests/unit/modules/test_uwsgi.py @@ -0,0 +1,27 @@ +""" + Test cases for salt.modules.uswgi +""" + + +import pytest + +import salt.modules.uwsgi as uwsgi +from tests.support.mock import MagicMock, Mock, patch + + +@pytest.fixture +def configure_loader_modules(): + with patch("salt.utils.path.which", Mock(return_value="/usr/bin/uwsgi")): + return {uwsgi: {}} + + +def test_uwsgi_stats(): + socket = "127.0.0.1:5050" + mock = MagicMock(return_value='{"a": 1, "b": 2}') + with patch.dict(uwsgi.__salt__, {"cmd.run": mock}): + result = uwsgi.stats(socket) + mock.assert_called_once_with( + ["uwsgi", "--connect-and-read", f"{socket}"], + python_shell=False, + ) + assert result == {"a": 1, "b": 2} diff --git a/tests/pytests/unit/modules/test_vagrant.py b/tests/pytests/unit/modules/test_vagrant.py new file mode 100644 index 000000000000..a6bc3c32cd01 --- /dev/null +++ b/tests/pytests/unit/modules/test_vagrant.py @@ -0,0 +1,167 @@ +""" + TestCase for the salt.modules.vagrant module. +""" + + +import pytest + +import salt.exceptions +import salt.modules.vagrant as vagrant +import salt.utils.platform +from tests.support.mock import MagicMock, patch + + +@pytest.fixture +def local_opts(tmp_path): + return { + "extension_modules": "", + "vagrant_sdb_data": { + "driver": "sqlite3", + "database": str(tmp_path / "test_vagrant.sqlite"), + "table": "sdb", + "create_table": True, + }, + } + + +@pytest.fixture +def configure_loader_modules(local_opts): + return {vagrant: {"__opts__": local_opts}} + + +def test_vagrant_get_vm_info_not_found(): + mock_sdb = MagicMock(return_value=None) + with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}): + with pytest.raises(salt.exceptions.SaltInvocationError): + vagrant.get_vm_info("thisNameDoesNotExist") + + +def test_vagrant_init_positional(local_opts, tmp_path): + path_nowhere = str(tmp_path / "tmp" / "nowhere") + mock_sdb = MagicMock(return_value=None) + with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): + resp = vagrant.init( + "test1", + path_nowhere, + "onetest", + "nobody", + False, + "french", + {"different": "very"}, + ) + assert resp.startswith("Name test1 defined") + expected = dict( + name="test1", + cwd=path_nowhere, + machine="onetest", + runas="nobody", + vagrant_provider="french", + different="very", + ) + mock_sdb.assert_called_with( + f"sdb://vagrant_sdb_data/onetest?{path_nowhere}", + "test1", + local_opts, + ) + mock_sdb.assert_any_call("sdb://vagrant_sdb_data/test1", expected, local_opts) + + +def test_vagrant_get_vm_info(): + testdict = {"testone": "one", "machine": "two"} + mock_sdb = MagicMock(return_value=testdict) + with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}): + resp = vagrant.get_vm_info("test1") + assert resp == testdict + + +def test_vagrant_init_dict(local_opts): + testdict = dict( + cwd="/tmp/anywhere", + machine="twotest", + runas="somebody", + vagrant_provider="english", + ) + expected = testdict.copy() + expected["name"] = "test2" + mock_sdb = MagicMock(return_value=None) + with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): + vagrant.init("test2", vm=testdict) + mock_sdb.assert_any_call("sdb://vagrant_sdb_data/test2", expected, local_opts) + + +def test_vagrant_init_arg_override(local_opts): + testdict = dict( + cwd="/tmp/there", + machine="treetest", + runas="anybody", + vagrant_provider="spansh", + ) + mock_sdb = MagicMock(return_value=None) + with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): + vagrant.init( + "test3", + cwd="/tmp", + machine="threetest", + runas="him", + vagrant_provider="polish", + vm=testdict, + ) + expected = dict( + name="test3", + cwd="/tmp", + machine="threetest", + runas="him", + vagrant_provider="polish", + ) + mock_sdb.assert_any_call("sdb://vagrant_sdb_data/test3", expected, local_opts) + + +def test_vagrant_get_ssh_config_fails(): + mock_sdb = MagicMock(return_value=None) + with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): + mock_sdb = MagicMock(return_value={}) + with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}): + vagrant.init("test3", cwd="/tmp") + with pytest.raises(salt.exceptions.SaltInvocationError): + vagrant.get_ssh_config("test3") # has not been started + + +def test_vagrant_destroy(local_opts, tmp_path): + path_mydir = str(tmp_path / "my" / "dir") + mock_cmd = MagicMock(return_value={"retcode": 0}) + with patch.dict(vagrant.__salt__, {"cmd.run_all": mock_cmd}): + mock_sdb = MagicMock(return_value=None) + with patch.dict(vagrant.__utils__, {"sdb.sdb_delete": mock_sdb}): + mock_sdb_get = MagicMock( + return_value={"machine": "macfour", "cwd": path_mydir} + ) + with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb_get}): + assert vagrant.destroy("test4") + mock_sdb.assert_any_call( + f"sdb://vagrant_sdb_data/macfour?{path_mydir}", + local_opts, + ) + mock_sdb.assert_any_call("sdb://vagrant_sdb_data/test4", local_opts) + cmd = "vagrant destroy -f macfour" + mock_cmd.assert_called_with( + cmd, runas=None, cwd=path_mydir, output_loglevel="info" + ) + + +def test_vagrant_start(): + mock_cmd = MagicMock(return_value={"retcode": 0}) + with patch.dict(vagrant.__salt__, {"cmd.run_all": mock_cmd}): + mock_sdb_get = MagicMock( + return_value={ + "machine": "five", + "cwd": "/the/dir", + "runas": "me", + "vagrant_provider": "him", + } + ) + with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb_get}): + assert vagrant.start("test5") + cmd = "vagrant up five --provider=him" + mock_cmd.assert_called_with( + cmd, runas="me", cwd="/the/dir", output_loglevel="info" + ) diff --git a/tests/pytests/unit/modules/test_xfs.py b/tests/pytests/unit/modules/test_xfs.py new file mode 100644 index 000000000000..9d1ce9dff34f --- /dev/null +++ b/tests/pytests/unit/modules/test_xfs.py @@ -0,0 +1,113 @@ +""" + Test cases for salt.modules.xfs +""" + + +import textwrap + +import pytest + +import salt.modules.xfs as xfs + +pytestmark = [ + pytest.mark.skip_on_windows(reason="xfs not available on windows"), + pytest.mark.skip_on_darwin(reason="xfs not available on darwin."), +] + + +@pytest.fixture +def configure_loader_modules(): + return {xfs: {}} + + +def test_blkid_output(): + """ + Test xfs._blkid_output when there is data + """ + blkid_export = textwrap.dedent( + """ + DEVNAME=/dev/sda1 + UUID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + TYPE=xfs + PARTUUID=YYYYYYYY-YY + + DEVNAME=/dev/sdb1 + PARTUUID=ZZZZZZZZ-ZZZZ-ZZZZ-ZZZZ-ZZZZZZZZZZZZ + """ + ) + # We expect to find only data from /dev/sda1, nothig from + # /dev/sdb1 + assert xfs._blkid_output(blkid_export) == { + "/dev/sda1": { + "label": None, + "partuuid": "YYYYYYYY-YY", + "uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + } + } + + +def test_parse_xfs_info(): + """ + Test parsing output from mkfs.xfs. + """ + data = textwrap.dedent( + """ + meta-data=/dev/vg00/testvol isize=512 agcount=4, agsize=1310720 blks + = sectsz=4096 attr=2, projid32bit=1 + = crc=1 finobt=1, sparse=1, rmapbt=0 + = reflink=1 + data = bsize=4096 blocks=5242880, imaxpct=25 + = sunit=0 swidth=0 blks + naming =version 2 bsize=4096 ascii-ci=0, ftype=1 + log =internal log bsize=4096 blocks=2560, version=2 + = sectsz=4096 sunit=1 blks, lazy-count=1 + realtime =none extsz=4096 blocks=0, rtextents=0 + Discarding blocks...Done. + """ + ) + + assert xfs._parse_xfs_info(data) == { + "meta-data": { + "section": "/dev/vg00/testvol", + "isize": "512", + "agcount": "4", + "agsize": "1310720 blks", + "sectsz": "4096", + "attr": "2", + "projid32bit": "1", + "crc": "1", + "finobt": "1", + "sparse": "1", + "rmapbt": "0", + "reflink": "1", + }, + "data": { + "section": "data", + "bsize": "4096", + "blocks": "5242880", + "imaxpct": "25", + "sunit": "0", + "swidth": "0 blks", + }, + "naming": { + "section": "version 2", + "bsize": "4096", + "ascii-ci": "0", + "ftype": "1", + }, + "log": { + "section": "internal log", + "bsize": "4096", + "blocks": "2560", + "version": "2", + "sectsz": "4096", + "sunit": "1 blks", + "lazy-count": "1", + }, + "realtime": { + "section": "none", + "extsz": "4096", + "blocks": "0", + "rtextents": "0", + }, + } diff --git a/tests/pytests/unit/states/file/test_rename.py b/tests/pytests/unit/states/file/test_rename.py index bccb59b8c03a..379c9c77822a 100644 --- a/tests/pytests/unit/states/file/test_rename.py +++ b/tests/pytests/unit/states/file/test_rename.py @@ -10,6 +10,7 @@ import salt.serializers.python as pythonserializer import salt.serializers.yaml as yamlserializer import salt.states.file as filestate +import salt.utils.platform from tests.support.mock import MagicMock, patch log = logging.getLogger(__name__) @@ -57,14 +58,14 @@ def test_rename(tmp_path): mock_lex = MagicMock(side_effect=[False, True, True]) with patch.object(os.path, "isabs", mock_f): - comt = "Specified file {} is not an absolute path".format(name) + comt = f"Specified file {name} is not an absolute path" ret.update({"comment": comt, "name": name}) assert filestate.rename(name, source) == ret mock_lex = MagicMock(return_value=False) with patch.object(os.path, "isabs", mock_t): with patch.object(os.path, "lexists", mock_lex): - comt = 'Source file "{}" has already been moved out of place'.format(source) + comt = f'Source file "{source}" has already been moved out of place' ret.update({"comment": comt, "result": True}) assert filestate.rename(name, source) == ret @@ -93,7 +94,7 @@ def test_rename(tmp_path): with patch.object(os.path, "isabs", mock_t): with patch.object(os.path, "lexists", mock_lex): with patch.dict(filestate.__opts__, {"test": True}): - comt = 'File "{}" is set to be moved to "{}"'.format(source, name) + comt = f'File "{source}" is set to be moved to "{name}"' ret.update({"name": name, "comment": comt, "result": None}) assert filestate.rename(name, source) == ret @@ -102,7 +103,7 @@ def test_rename(tmp_path): with patch.object(os.path, "lexists", mock_lex): with patch.object(os.path, "isdir", mock_f): with patch.dict(filestate.__opts__, {"test": False}): - comt = "The target directory {} is not present".format(tmp_path) + comt = f"The target directory {tmp_path} is not present" ret.update({"name": name, "comment": comt, "result": False}) assert filestate.rename(name, source) == ret @@ -115,7 +116,7 @@ def test_rename(tmp_path): with patch.object( shutil, "move", MagicMock(side_effect=IOError) ): - comt = 'Failed to move "{}" to "{}"'.format(source, name) + comt = f'Failed to move "{source}" to "{name}"' ret.update({"name": name, "comment": comt, "result": False}) assert filestate.rename(name, source) == ret @@ -126,7 +127,7 @@ def test_rename(tmp_path): with patch.object(os.path, "islink", mock_f): with patch.dict(filestate.__opts__, {"test": False}): with patch.object(shutil, "move", MagicMock()): - comt = 'Moved "{}" to "{}"'.format(source, name) + comt = f'Moved "{source}" to "{name}"' ret.update( { "name": name, @@ -136,3 +137,56 @@ def test_rename(tmp_path): } ) assert filestate.rename(name, source) == ret + + mock_lex = MagicMock(side_effect=[True, False, False]) + with patch.object(os.path, "isabs", mock_t), patch.object( + os.path, "lexists", mock_lex + ), patch.object(os.path, "isdir", mock_f), patch.dict( + filestate.__salt__, + {"file.makedirs": MagicMock(side_effect=filestate.CommandExecutionError())}, + ), patch.object( + os.path, "islink", mock_f + ), patch.dict( + filestate.__opts__, {"test": False} + ), patch.object( + shutil, "move", MagicMock() + ): + if salt.utils.platform.is_windows(): + comt = "Drive C: is not mapped" + else: + comt = "Drive is not mapped" + ret.update( + { + "name": name, + "comment": comt, + "result": False, + "changes": {}, + } + ) + assert filestate.rename(name, source, makedirs=True) == ret + + mock_lex = MagicMock(side_effect=[True, False, False]) + mock_link = str(tmp_path / "salt" / "link") + with patch.object(os.path, "isabs", mock_t), patch.object( + os.path, "lexists", mock_lex + ), patch.object(os.path, "isdir", mock_t), patch.object( + os.path, "islink", mock_t + ), patch( + "salt.utils.path.readlink", MagicMock(return_value=mock_link) + ), patch.dict( + filestate.__opts__, {"test": False} + ), patch.object( + os, "symlink", MagicMock() + ), patch.object( + os, "unlink", MagicMock() + ): + comt = f'Moved "{source}" to "{name}"' + ret.update( + { + "name": name, + "comment": comt, + "result": True, + "changes": {name: source}, + } + ) + assert filestate.rename(name, source) == ret diff --git a/tests/pytests/unit/states/test_gem.py b/tests/pytests/unit/states/test_gem.py new file mode 100644 index 000000000000..6d3a7ac001d8 --- /dev/null +++ b/tests/pytests/unit/states/test_gem.py @@ -0,0 +1,137 @@ +""" + Tests of salt.states.gem +""" + + +import pytest + +import salt.states.gem as gem +from tests.support.mock import MagicMock, patch + + +@pytest.fixture +def configure_loader_modules(): + return {gem: {"__opts__": {"test": False}}} + + +def test_installed(): + gems = {"foo": ["1.0"], "bar": ["2.0"]} + gem_list = MagicMock(return_value=gems) + gem_install_succeeds = MagicMock(return_value=True) + gem_install_fails = MagicMock(return_value=False) + + with patch.dict(gem.__salt__, {"gem.list": gem_list}): + with patch.dict(gem.__salt__, {"gem.install": gem_install_succeeds}): + ret = gem.installed("foo") + assert ret["result"] is True + ret = gem.installed("quux") + assert ret["result"] is True + gem_install_succeeds.assert_called_once_with( + "quux", + pre_releases=False, + ruby=None, + runas=None, + version=None, + proxy=None, + rdoc=False, + source=None, + ri=False, + gem_bin=None, + ) + + with patch.dict(gem.__salt__, {"gem.install": gem_install_fails}): + ret = gem.installed("quux") + assert ret["result"] is False + gem_install_fails.assert_called_once_with( + "quux", + pre_releases=False, + ruby=None, + runas=None, + version=None, + proxy=None, + rdoc=False, + source=None, + ri=False, + gem_bin=None, + ) + + +def test_installed_version(): + gems = {"foo": ["1.0"], "bar": ["2.0"]} + gem_list = MagicMock(return_value=gems) + gem_install_succeeds = MagicMock(return_value=True) + + with patch.dict(gem.__salt__, {"gem.list": gem_list}): + with patch.dict(gem.__salt__, {"gem.install": gem_install_succeeds}): + ret = gem.installed("foo", version=">= 1.0") + assert ret["result"] is True + assert ret["comment"] == "Installed Gem meets version requirements." + + +def test_removed(): + gems = ["foo", "bar"] + gem_list = MagicMock(return_value=gems) + gem_uninstall_succeeds = MagicMock(return_value=True) + gem_uninstall_fails = MagicMock(return_value=False) + with patch.dict(gem.__salt__, {"gem.list": gem_list}): + with patch.dict(gem.__salt__, {"gem.uninstall": gem_uninstall_succeeds}): + ret = gem.removed("quux") + assert ret["result"] is True + ret = gem.removed("foo") + assert ret["result"] is True + gem_uninstall_succeeds.assert_called_once_with( + "foo", None, runas=None, gem_bin=None + ) + + with patch.dict(gem.__salt__, {"gem.uninstall": gem_uninstall_fails}): + ret = gem.removed("bar") + assert ret["result"] is False + gem_uninstall_fails.assert_called_once_with( + "bar", None, runas=None, gem_bin=None + ) + + +def test_sources_add(): + gem_sources = ["http://foo", "http://bar"] + gem_sources_list = MagicMock(return_value=gem_sources) + gem_sources_add_succeeds = MagicMock(return_value=True) + gem_sources_add_fails = MagicMock(return_value=False) + with patch.dict(gem.__salt__, {"gem.sources_list": gem_sources_list}): + with patch.dict(gem.__salt__, {"gem.sources_add": gem_sources_add_succeeds}): + ret = gem.sources_add("http://foo") + assert ret["result"] is True + ret = gem.sources_add("http://fui") + assert ret["result"] is True + gem_sources_add_succeeds.assert_called_once_with( + source_uri="http://fui", ruby=None, runas=None + ) + with patch.dict(gem.__salt__, {"gem.sources_add": gem_sources_add_fails}): + ret = gem.sources_add("http://fui") + assert ret["result"] is False + gem_sources_add_fails.assert_called_once_with( + source_uri="http://fui", ruby=None, runas=None + ) + + +def test_sources_remove(): + gem_sources = ["http://foo", "http://bar"] + gem_sources_list = MagicMock(return_value=gem_sources) + gem_sources_remove_succeeds = MagicMock(return_value=True) + gem_sources_remove_fails = MagicMock(return_value=False) + with patch.dict(gem.__salt__, {"gem.sources_list": gem_sources_list}): + with patch.dict( + gem.__salt__, {"gem.sources_remove": gem_sources_remove_succeeds} + ): + ret = gem.sources_remove("http://fui") + assert ret["result"] is True + ret = gem.sources_remove("http://foo") + assert ret["result"] is True + gem_sources_remove_succeeds.assert_called_once_with( + source_uri="http://foo", ruby=None, runas=None + ) + with patch.dict(gem.__salt__, {"gem.sources_remove": gem_sources_remove_fails}): + ret = gem.sources_remove("http://bar") + assert ret["result"] is False + gem_sources_remove_fails.assert_called_once_with( + source_uri="http://bar", ruby=None, runas=None + ) diff --git a/tests/pytests/unit/states/test_glusterfs.py b/tests/pytests/unit/states/test_glusterfs.py new file mode 100644 index 000000000000..b662c8e8df8a --- /dev/null +++ b/tests/pytests/unit/states/test_glusterfs.py @@ -0,0 +1,418 @@ +""" + :codeauthor: Jayesh Kariya + + Test cases for salt.states.glusterfs +""" + +import pytest + +import salt.states.glusterfs as glusterfs +import salt.utils.cloud +import salt.utils.network +from tests.support.mock import MagicMock, patch + + +@pytest.fixture +def configure_loader_modules(): + return {glusterfs: {}} + + +def test_peered(): + """ + Test to verify if node is peered. + """ + name = "server1" + + ret = {"name": name, "result": True, "comment": "", "changes": {}} + + mock_ip = MagicMock(return_value=["1.2.3.4", "1.2.3.5"]) + mock_ip6 = MagicMock(return_value=["2001:db8::1"]) + mock_host_ips = MagicMock(return_value=["1.2.3.5"]) + mock_peer = MagicMock(return_value=True) + mock_status = MagicMock(return_value={"uuid1": {"hostnames": [name]}}) + + with patch.dict( + glusterfs.__salt__, + {"glusterfs.peer_status": mock_status, "glusterfs.peer": mock_peer}, + ): + with patch.object(salt.utils.network, "ip_addrs", mock_ip), patch.object( + salt.utils.network, "ip_addrs6", mock_ip6 + ), patch.object(salt.utils.network, "host_to_ips", mock_host_ips): + comt = "Peering with localhost is not needed" + ret.update({"comment": comt}) + assert glusterfs.peered(name) == ret + + mock_host_ips.return_value = ["127.0.1.1"] + comt = "Peering with localhost is not needed" + ret.update({"comment": comt}) + assert glusterfs.peered(name) == ret + + mock_host_ips.return_value = ["2001:db8::1"] + assert glusterfs.peered(name) == ret + + mock_host_ips.return_value = ["1.2.3.42"] + comt = f"Host {name} already peered" + ret.update({"comment": comt}) + assert glusterfs.peered(name) == ret + + with patch.dict(glusterfs.__opts__, {"test": False}): + old = {"uuid1": {"hostnames": ["other1"]}} + new = { + "uuid1": {"hostnames": ["other1"]}, + "uuid2": {"hostnames": ["someAlias", name]}, + } + mock_status.side_effect = [old, new] + comt = f"Host {name} successfully peered" + ret.update({"comment": comt, "changes": {"old": old, "new": new}}) + assert glusterfs.peered(name) == ret + mock_status.side_effect = None + + mock_status.return_value = {"uuid1": {"hostnames": ["other"]}} + mock_peer.return_value = False + + ret.update({"result": False}) + + comt = "Failed to peer with {}, please check logs for errors".format( + name + ) + ret.update({"comment": comt, "changes": {}}) + assert glusterfs.peered(name) == ret + + comt = "Invalid characters in peer name." + ret.update({"comment": comt, "name": ":/"}) + assert glusterfs.peered(":/") == ret + ret.update({"name": name}) + + with patch.dict(glusterfs.__opts__, {"test": True}): + comt = f"Peer {name} will be added." + ret.update({"comment": comt, "result": None}) + assert glusterfs.peered(name) == ret + + +def test_volume_present(): + """ + Test to ensure that a volume exists + """ + name = "salt" + bricks = ["host1:/brick1"] + ret = {"name": name, "result": True, "comment": "", "changes": {}} + + started_info = {name: {"status": "1"}} + stopped_info = {name: {"status": "0"}} + + mock_info = MagicMock() + mock_list = MagicMock() + mock_create = MagicMock() + mock_start = MagicMock(return_value=True) + + with patch.dict( + glusterfs.__salt__, + { + "glusterfs.info": mock_info, + "glusterfs.list_volumes": mock_list, + "glusterfs.create_volume": mock_create, + "glusterfs.start_volume": mock_start, + }, + ): + with patch.dict(glusterfs.__opts__, {"test": False}): + mock_list.return_value = [name] + mock_info.return_value = started_info + comt = f"Volume {name} already exists and is started" + ret.update({"comment": comt}) + assert glusterfs.volume_present(name, bricks, start=True) == ret + + mock_info.return_value = stopped_info + comt = f"Volume {name} already exists and is now started" + ret.update( + {"comment": comt, "changes": {"old": "stopped", "new": "started"}} + ) + assert glusterfs.volume_present(name, bricks, start=True) == ret + + comt = f"Volume {name} already exists" + ret.update({"comment": comt, "changes": {}}) + assert glusterfs.volume_present(name, bricks, start=False) == ret + with patch.dict(glusterfs.__opts__, {"test": True}): + comt = f"Volume {name} already exists" + ret.update({"comment": comt, "result": None}) + assert glusterfs.volume_present(name, bricks, start=False) == ret + + comt = f"Volume {name} already exists and will be started" + ret.update({"comment": comt, "result": None}) + assert glusterfs.volume_present(name, bricks, start=True) == ret + + mock_list.return_value = [] + comt = f"Volume {name} will be created" + ret.update({"comment": comt, "result": None}) + assert glusterfs.volume_present(name, bricks, start=False) == ret + + comt = f"Volume {name} will be created and started" + ret.update({"comment": comt, "result": None}) + assert glusterfs.volume_present(name, bricks, start=True) == ret + + with patch.dict(glusterfs.__opts__, {"test": False}): + mock_list.side_effect = [[], [name]] + comt = f"Volume {name} is created" + ret.update( + { + "comment": comt, + "result": True, + "changes": {"old": [], "new": [name]}, + } + ) + assert glusterfs.volume_present(name, bricks, start=False) == ret + + mock_list.side_effect = [[], [name]] + comt = f"Volume {name} is created and is now started" + ret.update({"comment": comt, "result": True}) + assert glusterfs.volume_present(name, bricks, start=True) == ret + + mock_list.side_effect = None + mock_list.return_value = [] + mock_create.return_value = False + comt = f"Creation of volume {name} failed" + ret.update({"comment": comt, "result": False, "changes": {}}) + assert glusterfs.volume_present(name, bricks) == ret + + with patch.object(salt.utils.cloud, "check_name", MagicMock(return_value=True)): + comt = "Invalid characters in volume name." + ret.update({"comment": comt, "result": False}) + assert glusterfs.volume_present(name, bricks) == ret + + +def test_started(): + """ + Test to check if volume has been started + """ + name = "salt" + + ret = {"name": name, "result": False, "comment": "", "changes": {}} + + started_info = {name: {"status": "1"}} + stopped_info = {name: {"status": "0"}} + mock_info = MagicMock(return_value={}) + mock_start = MagicMock(return_value=True) + + with patch.dict( + glusterfs.__salt__, + {"glusterfs.info": mock_info, "glusterfs.start_volume": mock_start}, + ): + comt = f"Volume {name} does not exist" + ret.update({"comment": comt}) + assert glusterfs.started(name) == ret + + mock_info.return_value = started_info + comt = f"Volume {name} is already started" + ret.update({"comment": comt, "result": True}) + assert glusterfs.started(name) == ret + + with patch.dict(glusterfs.__opts__, {"test": True}): + mock_info.return_value = stopped_info + comt = f"Volume {name} will be started" + ret.update({"comment": comt, "result": None}) + assert glusterfs.started(name) == ret + + with patch.dict(glusterfs.__opts__, {"test": False}): + comt = f"Volume {name} is started" + ret.update( + { + "comment": comt, + "result": True, + "change": {"new": "started", "old": "stopped"}, + } + ) + assert glusterfs.started(name) == ret + + +def test_add_volume_bricks(): + """ + Test to add brick(s) to an existing volume + """ + name = "salt" + bricks = ["host1:/drive1"] + old_bricks = ["host1:/drive2"] + + ret = {"name": name, "result": False, "comment": "", "changes": {}} + + stopped_volinfo = {"salt": {"status": "0"}} + volinfo = {"salt": {"status": "1", "bricks": {"brick1": {"path": old_bricks[0]}}}} + new_volinfo = { + "salt": { + "status": "1", + "bricks": { + "brick1": {"path": old_bricks[0]}, + "brick2": {"path": bricks[0]}, + }, + } + } + + mock_info = MagicMock(return_value={}) + mock_add = MagicMock(side_effect=[False, True]) + + with patch.dict( + glusterfs.__salt__, + {"glusterfs.info": mock_info, "glusterfs.add_volume_bricks": mock_add}, + ): + ret.update({"comment": "Volume salt does not exist"}) + assert glusterfs.add_volume_bricks(name, bricks) == ret + + mock_info.return_value = stopped_volinfo + ret.update({"comment": "Volume salt is not started"}) + assert glusterfs.add_volume_bricks(name, bricks) == ret + + mock_info.return_value = volinfo + ret.update({"comment": "Adding bricks to volume salt failed"}) + assert glusterfs.add_volume_bricks(name, bricks) == ret + + ret.update({"result": True}) + ret.update({"comment": "Bricks already added in volume salt"}) + assert glusterfs.add_volume_bricks(name, old_bricks) == ret + + mock_info.side_effect = [volinfo, new_volinfo] + ret.update( + { + "comment": "Bricks successfully added to volume salt", + "changes": {"new": bricks + old_bricks, "old": old_bricks}, + } + ) + # Let's sort ourselves because the test under python 3 sometimes fails + # just because of the new changes list order + result = glusterfs.add_volume_bricks(name, bricks) + ret["changes"]["new"] = sorted(ret["changes"]["new"]) + result["changes"]["new"] = sorted(result["changes"]["new"]) + assert result == ret + + +def test_op_version(): + """ + Test setting the Glusterfs op-version + """ + name = "salt" + current = 30707 + new = 31200 + + ret = {"name": name, "result": False, "comment": "", "changes": {}} + + mock_get_version = MagicMock(return_value={}) + mock_set_version = MagicMock(return_value={}) + + with patch.dict( + glusterfs.__salt__, + { + "glusterfs.get_op_version": mock_get_version, + "glusterfs.set_op_version": mock_set_version, + }, + ): + mock_get_version.return_value = [False, "some error message"] + ret.update({"result": False}) + ret.update({"comment": "some error message"}) + assert glusterfs.op_version(name, current) == ret + + mock_get_version.return_value = current + ret.update({"result": True}) + ret.update( + { + "comment": ( + "Glusterfs cluster.op-version for {} already set to {}".format( + name, current + ) + ) + } + ) + assert glusterfs.op_version(name, current) == ret + + with patch.dict(glusterfs.__opts__, {"test": True}): + mock_set_version.return_value = [False, "Failed to set version"] + ret.update({"result": None}) + ret.update( + { + "comment": ( + "An attempt would be made to set the cluster.op-version for" + " {} to {}.".format(name, new) + ) + } + ) + assert glusterfs.op_version(name, new) == ret + + with patch.dict(glusterfs.__opts__, {"test": False}): + mock_set_version.return_value = [False, "Failed to set version"] + ret.update({"result": False}) + ret.update({"comment": "Failed to set version"}) + assert glusterfs.op_version(name, new) == ret + + mock_set_version.return_value = "some success message" + ret.update({"comment": "some success message"}) + ret.update({"changes": {"old": current, "new": new}}) + ret.update({"result": True}) + assert glusterfs.op_version(name, new) == ret + + +def test_max_op_version(): + """ + Test setting the Glusterfs to its self reported max-op-version + """ + name = "salt" + current = 30707 + new = 31200 + + ret = {"name": name, "result": False, "comment": "", "changes": {}} + + mock_get_version = MagicMock(return_value={}) + mock_get_max_op_version = MagicMock(return_value={}) + mock_set_version = MagicMock(return_value={}) + + with patch.dict( + glusterfs.__salt__, + { + "glusterfs.get_op_version": mock_get_version, + "glusterfs.set_op_version": mock_set_version, + "glusterfs.get_max_op_version": mock_get_max_op_version, + }, + ): + mock_get_version.return_value = [False, "some error message"] + ret.update({"result": False}) + ret.update({"comment": "some error message"}) + assert glusterfs.max_op_version(name) == ret + + mock_get_version.return_value = current + mock_get_max_op_version.return_value = [False, "some error message"] + ret.update({"result": False}) + ret.update({"comment": "some error message"}) + assert glusterfs.max_op_version(name) == ret + + mock_get_version.return_value = current + mock_get_max_op_version.return_value = current + ret.update({"result": True}) + ret.update( + { + "comment": ( + "The cluster.op-version is already set to the" + " cluster.max-op-version of {}".format(current) + ) + } + ) + assert glusterfs.max_op_version(name) == ret + + with patch.dict(glusterfs.__opts__, {"test": True}): + mock_get_max_op_version.return_value = new + ret.update({"result": None}) + ret.update( + { + "comment": ( + "An attempt would be made to set the cluster.op-version" + " to {}.".format(new) + ) + } + ) + assert glusterfs.max_op_version(name) == ret + + with patch.dict(glusterfs.__opts__, {"test": False}): + mock_set_version.return_value = [False, "Failed to set version"] + ret.update({"result": False}) + ret.update({"comment": "Failed to set version"}) + assert glusterfs.max_op_version(name) == ret + + mock_set_version.return_value = "some success message" + ret.update({"comment": "some success message"}) + ret.update({"changes": {"old": current, "new": new}}) + ret.update({"result": True}) + assert glusterfs.max_op_version(name) == ret diff --git a/pkg/tests/support/helpers.py b/tests/support/pkg.py similarity index 86% rename from pkg/tests/support/helpers.py rename to tests/support/pkg.py index 130a82987a56..eaf28eb4a3b3 100644 --- a/pkg/tests/support/helpers.py +++ b/tests/support/pkg.py @@ -16,6 +16,7 @@ import psutil import pytest import requests +import saltfactories.cli from pytestshellutils.shell import DaemonImpl, Subprocess from pytestshellutils.utils.processes import ( ProcessResult, @@ -24,26 +25,15 @@ ) from pytestskipmarkers.utils import platform from saltfactories.bases import SystemdSaltDaemonImpl -from saltfactories.cli import call, key, salt +from saltfactories.cli import call, key from saltfactories.daemons import api, master, minion from saltfactories.utils import cli_scripts -try: - import crypt +import salt.utils.files +from tests.conftest import CODE_DIR +from tests.support.pytest.helpers import TestAccount, download_file - HAS_CRYPT = True -except ImportError: - HAS_CRYPT = False -try: - import pwd - - HAS_PWD = True -except ImportError: - HAS_PWD = False - -TESTS_DIR = pathlib.Path(__file__).resolve().parent.parent -CODE_DIR = TESTS_DIR.parent -ARTIFACTS_DIR = CODE_DIR / "artifacts" +ARTIFACTS_DIR = CODE_DIR / "artifacts" / "pkg" log = logging.getLogger(__name__) @@ -207,7 +197,8 @@ def _default_artifact_version(self): The version of the local salt artifacts being tested, based on regex matching """ version = "" - for artifact in ARTIFACTS_DIR.glob("**/*.*"): + artifacts = list(ARTIFACTS_DIR.glob("**/*.*")) + for artifact in artifacts: version = re.search( r"([0-9].*)(\-[0-9].fc|\-[0-9].el|\+ds|\_all|\_any|\_amd64|\_arm64|\-[0-9].am|(\-[0-9]-[a-z]*-[a-z]*[0-9_]*.|\-[0-9]*.*)(exe|msi|pkg|rpm|deb))", artifact.name, @@ -216,6 +207,11 @@ def _default_artifact_version(self): version = version.groups()[0].replace("_", "-").replace("~", "") version = version.split("-")[0] break + if not version: + pytest.fail( + f"Failed to package artifacts in '{ARTIFACTS_DIR}'. " + f"Directory Contents:\n{pprint.pformat(artifacts)}" + ) return version def update_process_path(self): @@ -413,7 +409,7 @@ def _install_pkgs(self, upgrade=False, downgrade=False): # ret = self.proc.run("start", "/wait", f"\"{str(pkg)} /start-minion=0 /S\"") batch_file = pathlib.Path(pkg).parent / "install_nsis.cmd" batch_content = f"start /wait {str(pkg)} /start-minion=0 /S" - with open(batch_file, "w") as fp: + with salt.utils.files.fopen(batch_file, "w") as fp: fp.write(batch_content) # Now run the batch file ret = self.proc.run("cmd.exe", "/c", str(batch_file)) @@ -426,7 +422,7 @@ def _install_pkgs(self, upgrade=False, downgrade=False): # expects unless we do it via a batch file batch_file = pathlib.Path(pkg).parent / "install_msi.cmd" batch_content = f'msiexec /qn /i "{str(pkg)}" START_MINION=""\n' - with open(batch_file, "w") as fp: + with salt.utils.files.fopen(batch_file, "w") as fp: fp.write(batch_content) # Now run the batch file ret = self.proc.run("cmd.exe", "/c", str(batch_file)) @@ -624,7 +620,7 @@ def install_previous(self, downgrade=False): f"https://repo.saltproject.io/{root_url}{distro_name}/{self.distro_version}/{arch}/{major_ver}/{gpg_key}", f"/etc/apt/keyrings/{gpg_dest}", ) - with open( + with salt.utils.files.fopen( pathlib.Path("/etc", "apt", "sources.list.d", "salt.list"), "w" ) as fp: fp.write( @@ -671,8 +667,8 @@ def install_previous(self, downgrade=False): # Let's not check the returncode if this is the case if not ( downgrade - and not packaging.version.parse(self.prev_version) - >= packaging.version.parse("3006.0") + and packaging.version.parse(self.prev_version) + < packaging.version.parse("3006.0") ): self._check_retcode(ret) if downgrade: @@ -716,7 +712,7 @@ def install_previous(self, downgrade=False): # expects unless we do it via a batch file batch_file = pkg_path.parent / "install_msi.cmd" batch_content = f'msiexec /qn /i {str(pkg_path)} START_MINION=""' - with open(batch_file, "w") as fp: + with salt.utils.files.fopen(batch_file, "w") as fp: fp.write(batch_content) # Now run the batch file ret = self.proc.run("cmd.exe", "/c", str(batch_file)) @@ -725,7 +721,7 @@ def install_previous(self, downgrade=False): # ret = self.proc.run("start", "/wait", f"\"{pkg_path} /start-minion=0 /S\"") batch_file = pkg_path.parent / "install_nsis.cmd" batch_content = f"start /wait {str(pkg_path)} /start-minion=0 /S" - with open(batch_file, "w") as fp: + with salt.utils.files.fopen(batch_file, "w") as fp: fp.write(batch_content) # Now run the batch file ret = self.proc.run("cmd.exe", "/c", str(batch_file)) @@ -826,40 +822,6 @@ def uninstall(self): ret = self.proc.run(self.pkg_mngr, self.rm_pkg, "-y", *self.salt_pkgs) self._check_retcode(ret) - def assert_uninstalled(self): - """ - Assert that the paths in /opt/saltstack/ were correctly - removed or not removed - """ - return - if platform.is_windows(): - # I'm not sure where the /opt/saltstack path is coming from - # This is the path we're using to test windows - opt_path = pathlib.Path(os.getenv("LocalAppData"), "salt", "pypath") - else: - opt_path = pathlib.Path(os.sep, "opt", "saltstack", "salt", "pypath") - if not opt_path.exists(): - if platform.is_windows(): - assert not opt_path.parent.exists() - else: - assert not opt_path.parent.parent.exists() - else: - opt_path_contents = list(opt_path.rglob("*")) - if not opt_path_contents: - pytest.fail( - f"The path '{opt_path}' exists but there are no files in it." - ) - else: - for path in list(opt_path_contents): - if path.name in (".installs.json", "__pycache__"): - opt_path_contents.remove(path) - if opt_path_contents: - pytest.fail( - "The test left some files behind: {}".format( - ", ".join([str(p) for p in opt_path_contents]) - ) - ) - def write_launchd_conf(self, service): service_name = f"com.saltstack.salt.{service}" ret = self.proc.run("launchctl", "list", service_name) @@ -873,8 +835,7 @@ def write_launchd_conf(self, service): plist_file.unlink() log.debug("Creating plist file for service: %s", service) - contents = textwrap.dedent( - f"""\ + contents = f"""\ @@ -886,9 +847,12 @@ def write_launchd_conf(self, service): KeepAlive ProgramArguments - - {self.run_root} - {service} + """ + for part in self.binary_paths[service]: + contents += ( + f"""\n {part}\n""" + ) + contents += f"""\ -c {self.conf_dir} @@ -905,8 +869,7 @@ def write_launchd_conf(self, service): """ - ) - plist_file.write_text(contents, encoding="utf-8") + plist_file.write_text(textwrap.dedent(contents), encoding="utf-8") contents = plist_file.read_text() log.debug("Created '%s'. Contents:\n%s", plist_file, contents) @@ -917,7 +880,7 @@ def write_systemd_conf(self, service, binary): ret = self.proc.run("systemctl", "daemon-reload") self._check_retcode(ret) ret = self.proc.run("systemctl", "status", service) - if ret.returncode in (3, 4): + if ret.returncode == 4: log.warning( "No systemd unit file was found for service %s. Creating one.", service ) @@ -941,7 +904,7 @@ def write_systemd_conf(self, service, binary): binary = shutil.which(binary[0]) or binary[0] elif isinstance(binary, list): binary = " ".join(binary) - unit_path = pathlib.Path("/etc", "systemd", "system", f"{service}.service") + unit_path = pathlib.Path(f"/etc/systemd/system/{service}.service") contents = contents.format( service=service, tgt=binary, conf_dir=self.conf_dir ) @@ -965,15 +928,17 @@ def __enter__(self): def __exit__(self, *_): if not self.no_uninstall: self.uninstall() - self.assert_uninstalled() class PkgSystemdSaltDaemonImpl(SystemdSaltDaemonImpl): + # pylint: disable=access-member-before-definition def get_service_name(self): if self._service_name is None: self._service_name = self.factory.script_name return self._service_name + # pylint: enable=access-member-before-definition + @attr.s(kw_only=True) class PkgLaunchdSaltDaemonImpl(PkgSystemdSaltDaemonImpl): @@ -1052,12 +1017,12 @@ def _terminate(self): # We completely override the parent class method because we're not using # the self._terminal property, it's a launchd service if self._process is None: # pragma: no cover + # pylint: disable=access-member-before-definition if TYPE_CHECKING: # Make mypy happy assert self._terminal_result - return ( - self._terminal_result - ) # pylint: disable=access-member-before-definition + return self._terminal_result + # pylint: enable=access-member-before-definition atexit.unregister(self.terminate) log.info("Stopping %s", self.factory) @@ -1065,12 +1030,10 @@ def _terminate(self): # Collect any child processes information before terminating the process with contextlib.suppress(psutil.NoSuchProcess): for child in psutil.Process(pid).children(recursive=True): - if ( - child not in self._children - ): # pylint: disable=access-member-before-definition - self._children.append( - child - ) # pylint: disable=access-member-before-definition + # pylint: disable=access-member-before-definition + if child not in self._children: + self._children.append(child) + # pylint: enable=access-member-before-definition if self._process.is_running(): # pragma: no cover cmdline = _get_cmdline(self._process) @@ -1109,10 +1072,12 @@ def _terminate(self): slow_stop=self.factory.slow_stop, ) + # pylint: disable=access-member-before-definition if self._terminal_stdout is not None: - self._terminal_stdout.close() # pylint: disable=access-member-before-definition + self._terminal_stdout.close() if self._terminal_stderr is not None: - self._terminal_stderr.close() # pylint: disable=access-member-before-definition + self._terminal_stderr.close() + # pylint: enable=access-member-before-definition stdout = stderr = "" try: self._terminal_result = ProcessResult( @@ -1187,12 +1152,12 @@ def _terminate(self): # We completely override the parent class method because we're not using the # self._terminal property, it's a systemd service if self._process is None: # pragma: no cover + # pylint: disable=access-member-before-definition if TYPE_CHECKING: # Make mypy happy assert self._terminal_result - return ( - self._terminal_result - ) # pylint: disable=access-member-before-definition + return self._terminal_result + # pylint: enable=access-member-before-definition atexit.unregister(self.terminate) log.info("Stopping %s", self.factory) @@ -1200,12 +1165,10 @@ def _terminate(self): # Collect any child processes information before terminating the process with contextlib.suppress(psutil.NoSuchProcess): for child in psutil.Process(pid).children(recursive=True): - if ( - child not in self._children - ): # pylint: disable=access-member-before-definition - self._children.append( - child - ) # pylint: disable=access-member-before-definition + # pylint: disable=access-member-before-definition + if child not in self._children: + self._children.append(child) + # pylint: enable=access-member-before-definition if self._process.is_running(): # pragma: no cover cmdline = _get_cmdline(self._process) @@ -1245,10 +1208,12 @@ def _terminate(self): slow_stop=self.factory.slow_stop, ) + # pylint: disable=access-member-before-definition if self._terminal_stdout is not None: - self._terminal_stdout.close() # pylint: disable=access-member-before-definition + self._terminal_stdout.close() if self._terminal_stderr is not None: - self._terminal_stderr.close() # pylint: disable=access-member-before-definition + self._terminal_stderr.close() + # pylint: enable=access-member-before-definition stdout = stderr = "" try: self._terminal_result = ProcessResult( @@ -1347,17 +1312,21 @@ def salt_api_daemon(self, **kwargs): factory_class=SaltApi, salt_pkg_install=self.salt_pkg_install, **kwargs ) - def salt_key_cli(self, **factory_class_kwargs): + def salt_key_cli(self, factory_class=None, **factory_class_kwargs): + if not factory_class: + factory_class = SaltKey + factory_class_kwargs["salt_pkg_install"] = self.salt_pkg_install return super().salt_key_cli( - factory_class=SaltKey, - salt_pkg_install=self.salt_pkg_install, + factory_class=factory_class, **factory_class_kwargs, ) - def salt_cli(self, **factory_class_kwargs): + def salt_cli(self, factory_class=None, **factory_class_kwargs): + if not factory_class: + factory_class = SaltCli + factory_class_kwargs["salt_pkg_install"] = self.salt_pkg_install return super().salt_cli( - factory_class=SaltCli, - salt_pkg_install=self.salt_pkg_install, + factory_class=factory_class, **factory_class_kwargs, ) @@ -1415,10 +1384,12 @@ def write_systemd_conf(self): "salt-minion", self.salt_pkg_install.binary_paths["minion"] ) - def salt_call_cli(self, **factory_class_kwargs): + def salt_call_cli(self, factory_class=None, **factory_class_kwargs): + if not factory_class: + factory_class = SaltCall + factory_class_kwargs["salt_pkg_install"] = self.salt_pkg_install return super().salt_call_cli( - factory_class=SaltCall, - salt_pkg_install=self.salt_pkg_install, + factory_class=factory_class, **factory_class_kwargs, ) @@ -1465,14 +1436,14 @@ def __attrs_post_init__(self): @attr.s(kw_only=True, slots=True) -class SaltCli(PkgMixin, salt.SaltCli): +class SaltCli(PkgMixin, saltfactories.cli.salt.SaltCli): """ Subclassed just to tweak the binary paths if needed. """ def __attrs_post_init__(self): self.script_name = "salt" - salt.SaltCli.__attrs_post_init__(self) + saltfactories.cli.salt.SaltCli.__attrs_post_init__(self) @attr.s(kw_only=True, slots=True) @@ -1486,82 +1457,10 @@ def __attrs_post_init__(self): key.SaltKey.__attrs_post_init__(self) -@attr.s(kw_only=True, slots=True) -class TestUser: - """ - Add a test user - """ - - salt_call_cli = attr.ib() - - username = attr.ib(default="saltdev") - # Must follow Windows Password Complexity requirements - password = attr.ib(default="P@ssW0rd") - _pw_record = attr.ib(init=False, repr=False, default=None) - - def salt_call_local(self, *args): - ret = self.salt_call_cli.run("--local", *args) - if ret.returncode != 0: - log.error(ret) - assert ret.returncode == 0 - return ret.data - - def add_user(self): - log.debug("Adding system account %r", self.username) - if platform.is_windows(): - self.salt_call_local("user.add", self.username, self.password) - else: - self.salt_call_local("user.add", self.username) - hash_passwd = crypt.crypt(self.password, crypt.mksalt(crypt.METHOD_SHA512)) - self.salt_call_local("shadow.set_password", self.username, hash_passwd) - assert self.username in self.salt_call_local("user.list_users") - - def remove_user(self): - log.debug("Removing system account %r", self.username) - if platform.is_windows(): - self.salt_call_local( - "user.delete", self.username, "purge=True", "force=True" - ) - else: - self.salt_call_local("user.delete", self.username, "remove=True") - - @property - def pw_record(self): - if self._pw_record is None and HAS_PWD: - self._pw_record = pwd.getpwnam(self.username) - return self._pw_record - - @property - def uid(self): - if HAS_PWD: - return self.pw_record.pw_uid - return None - - @property - def gid(self): - if HAS_PWD: - return self.pw_record.pw_gid - return None - - @property - def env(self): - environ = os.environ.copy() - environ["LOGNAME"] = environ["USER"] = self.username - environ["HOME"] = self.pw_record.pw_dir - return environ - - def __enter__(self): - self.add_user() - return self - - def __exit__(self, *_): - self.remove_user() - - @attr.s(kw_only=True, slots=True) class ApiRequest: - salt_api: SaltApi = attr.ib(repr=False) - test_account: TestUser = attr.ib(repr=False) + port: int = attr.ib(repr=False) + account: TestAccount = attr.ib(repr=False) session: requests.Session = attr.ib(init=False, repr=False) api_uri: str = attr.ib(init=False) auth_data: Dict[str, str] = attr.ib(init=False) @@ -1572,13 +1471,13 @@ def _default_session(self): @api_uri.default def _default_api_uri(self): - return f"http://localhost:{self.salt_api.config['rest_cherrypy']['port']}" + return f"http://localhost:{self.port}" @auth_data.default def _default_auth_data(self): return { - "username": self.test_account.username, - "password": self.test_account.password, + "username": self.account.username, + "password": self.account.password, "eauth": "auto", "out": "json", } @@ -1595,49 +1494,3 @@ def __enter__(self): def __exit__(self, *args): self.session.__exit__(*args) - - -@pytest.helpers.register -def remove_stale_minion_key(master, minion_id): - key_path = os.path.join(master.config["pki_dir"], "minions", minion_id) - if os.path.exists(key_path): - os.unlink(key_path) - else: - log.debug("The minion(id=%r) key was not found at %s", minion_id, key_path) - - -@pytest.helpers.register -def remove_stale_master_key(master): - keys_path = os.path.join(master.config["pki_dir"], "master") - for key_name in ("master.pem", "master.pub"): - key_path = os.path.join(keys_path, key_name) - if os.path.exists(key_path): - os.unlink(key_path) - else: - log.debug( - "The master(id=%r) %s key was not found at %s", - master.id, - key_name, - key_path, - ) - key_path = os.path.join(master.config["pki_dir"], "minion", "minion_master.pub") - if os.path.exists(key_path): - os.unlink(key_path) - else: - log.debug( - "The master(id=%r) minion_master.pub key was not found at %s", - master.id, - key_path, - ) - - -@pytest.helpers.register -def download_file(url, dest, auth=None): - # NOTE the stream=True parameter below - with requests.get(url, stream=True, auth=auth) as r: - r.raise_for_status() - with open(dest, "wb") as f: - for chunk in r.iter_content(chunk_size=8192): - if chunk: - f.write(chunk) - return dest diff --git a/tests/support/pytest/helpers.py b/tests/support/pytest/helpers.py index 1c886daf4e78..77d46e1dcf20 100644 --- a/tests/support/pytest/helpers.py +++ b/tests/support/pytest/helpers.py @@ -171,6 +171,31 @@ def remove_stale_minion_key(master, minion_id): log.debug("The minion(id=%r) key was not found at %s", minion_id, key_path) +@pytest.helpers.register +def remove_stale_master_key(master): + keys_path = os.path.join(master.config["pki_dir"], "master") + for key_name in ("master.pem", "master.pub"): + key_path = os.path.join(keys_path, key_name) + if os.path.exists(key_path): + os.unlink(key_path) + else: + log.debug( + "The master(id=%r) %s key was not found at %s", + master.id, + key_name, + key_path, + ) + key_path = os.path.join(master.config["pki_dir"], "minion", "minion_master.pub") + if os.path.exists(key_path): + os.unlink(key_path) + else: + log.debug( + "The master(id=%r) minion_master.pub key was not found at %s", + master.id, + key_path, + ) + + @pytest.helpers.register def remove_stale_proxy_minion_cache_file(proxy_minion, minion_id=None): cachefile = os.path.join( diff --git a/tests/unit/modules/test_debian_ip.py b/tests/unit/modules/test_debian_ip.py deleted file mode 100644 index 4934f7e52947..000000000000 --- a/tests/unit/modules/test_debian_ip.py +++ /dev/null @@ -1,1198 +0,0 @@ -""" - :codeauthor: Jayesh Kariya -""" - -import tempfile - -import jinja2.exceptions -import pytest - -import salt.modules.debian_ip as debian_ip -import salt.utils.files -import salt.utils.platform -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - -try: - from salt.utils.odict import OrderedDict as odict -except ImportError: - from collections import OrderedDict as odict - -# Big pile of interface data for unit tests -# To skip, search for 'DebianIpTestCase' -# fmt: off -test_interfaces = [ - # Structure - #{'iface_name': 'ethX', 'iface_type': 'eth', 'enabled': True, - # 'skip_test': bool(), # True to disable this test - # 'build_interface': dict(), # data read from sls - # 'get_interface(): OrderedDict(), # data read from interfaces file - # 'return': list()}, # jinja-rendered data - - # IPv4-only interface; single address - {'iface_name': 'eth1', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '192.168.4.9', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'enable_ipv6': False, - 'noifupdown': True, - }, - 'get_interface': odict([('eth1', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '192.168.4.9'), - ('netmask', '255.255.255.0'), - ('gateway', '192.168.4.1'), - ])), - ]))]))]), - 'return': [ - 'auto eth1\n', - 'iface eth1 inet static\n', - ' address 192.168.4.9\n', - ' netmask 255.255.255.0\n', - ' gateway 192.168.4.1\n', - '\n']}, - - # IPv6-only; single address - {'iface_name': 'eth2', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:beef::3', - 'ipv6netmask': '64', - 'ipv6gateway': '2001:db8:dead:beef::1', - 'enable_ipv6': True, - 'noifupdown': True, - }, - 'get_interface': odict([('eth2', odict([('enabled', True), ('data', odict([ - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:beef::3'), - ('netmask', 64), - ('gateway', '2001:db8:dead:beef::1'), - ])), - ]))]))]), - 'return': [ - 'auto eth2\n', - 'iface eth2 inet6 static\n', - ' address 2001:db8:dead:beef::3\n', - ' netmask 64\n', - ' gateway 2001:db8:dead:beef::1\n', - '\n']}, - - # IPv6-only; multiple addrs; no gw; first addr from ipv6addr - {'iface_name': 'eth3', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:beef::5/64', - 'ipv6ipaddrs': [ - '2001:db8:dead:beef::7/64', - '2001:db8:dead:beef::8/64', - '2001:db8:dead:beef::9/64'], - 'enable_ipv6': True, - 'noifupdown': True, - }, - 'get_interface': odict([('eth3', odict([('enabled', True), ('data', odict([ - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:beef::5/64'), - ('addresses', [ - '2001:db8:dead:beef::7/64', - '2001:db8:dead:beef::8/64', - '2001:db8:dead:beef::9/64', - ]), - ])), - ]))]))]), - 'return': [ - 'auto eth3\n', - 'iface eth3 inet6 static\n', - ' address 2001:db8:dead:beef::5/64\n', - ' address 2001:db8:dead:beef::7/64\n', - ' address 2001:db8:dead:beef::8/64\n', - ' address 2001:db8:dead:beef::9/64\n', - '\n']}, - - # IPv6-only; multiple addresses - {'iface_name': 'eth4', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'ipv6proto': 'static', - 'ipv6ipaddrs': [ - '2001:db8:dead:beef::5/64', - '2001:db8:dead:beef::7/64', - '2001:db8:dead:beef::8/64', - '2001:db8:dead:beef::9/64'], - 'ipv6gateway': '2001:db8:dead:beef::1', - 'enable_ipv6': True, - 'noifupdown': True, - }, - 'get_interface': odict([('eth4', odict([('enabled', True), ('data', odict([ - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:beef::5/64'), - ('addresses', [ - '2001:db8:dead:beef::7/64', - '2001:db8:dead:beef::8/64', - '2001:db8:dead:beef::9/64', - ]), - ('gateway', '2001:db8:dead:beef::1'), - ])), - ]))]))]), - 'return': [ - 'auto eth4\n', - 'iface eth4 inet6 static\n', - ' address 2001:db8:dead:beef::5/64\n', - ' address 2001:db8:dead:beef::7/64\n', - ' address 2001:db8:dead:beef::8/64\n', - ' address 2001:db8:dead:beef::9/64\n', - ' gateway 2001:db8:dead:beef::1\n', - '\n']}, - - # IPv4 and IPv6 settings with v4 disabled - {'iface_name': 'eth5', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '192.168.4.9', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:beef::3', - 'ipv6netmask': '64', - 'ipv6gateway': '2001:db8:dead:beef::1', - 'enable_ipv4': False, - 'noifupdown': True, - }, - 'get_interface': odict([('eth5', odict([('enabled', True), ('data', odict([ - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:beef::3'), - ('netmask', 64), - ('gateway', '2001:db8:dead:beef::1'), - ])), - ]))]))]), - 'return': [ - 'auto eth5\n', - 'iface eth5 inet6 static\n', - ' address 2001:db8:dead:beef::3\n', - ' netmask 64\n', - ' gateway 2001:db8:dead:beef::1\n', - '\n']}, - - # IPv4 and IPv6 settings with v6 disabled - {'iface_name': 'eth6', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '192.168.4.9', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:beef::3', - 'ipv6netmask': '64', - 'ipv6gateway': '2001:db8:dead:beef::1', - 'enable_ipv6': False, - 'noifupdown': True, - }, - 'get_interface': odict([('eth6', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '192.168.4.9'), - ('netmask', '255.255.255.0'), - ('gateway', '192.168.4.1'), - ])), - ]))]))]), - 'return': [ - 'auto eth6\n', - 'iface eth6 inet static\n', - ' address 192.168.4.9\n', - ' netmask 255.255.255.0\n', - ' gateway 192.168.4.1\n', - '\n']}, - - # IPv4 and IPv6; shared/overridden settings - {'iface_name': 'eth7', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '192.168.4.9', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:beef::3', - 'ipv6netmask': '64', - 'ipv6gateway': '2001:db8:dead:beef::1', - 'ttl': '18', # shared - 'ipv6ttl': '15', # overridden for v6 - 'mtu': '1480', # shared - 'enable_ipv6': True, - 'noifupdown': True, - }, - 'get_interface': odict([('eth7', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '192.168.4.9'), - ('netmask', '255.255.255.0'), - ('gateway', '192.168.4.1'), - ('ttl', 18), - ('mtu', 1480), - ])), - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:beef::3'), - ('netmask', 64), - ('gateway', '2001:db8:dead:beef::1'), - ('ttl', 15), - ('mtu', 1480), - ])), - ]))]))]), - 'return': [ - 'auto eth7\n', - 'iface eth7 inet static\n', - ' address 192.168.4.9\n', - ' netmask 255.255.255.0\n', - ' gateway 192.168.4.1\n', - ' ttl 18\n', - ' mtu 1480\n', - 'iface eth7 inet6 static\n', - ' address 2001:db8:dead:beef::3\n', - ' netmask 64\n', - ' gateway 2001:db8:dead:beef::1\n', - ' ttl 15\n', - ' mtu 1480\n', - '\n']}, - - # Slave iface - {'iface_name': 'eth8', 'iface_type': 'slave', 'enabled': True, - 'build_interface': { - 'master': 'bond0', - 'noifupdown': True, - }, - 'get_interface': odict([('eth8', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'manual'), - ('filename', None), - ('bonding', odict([ - ('master', 'bond0'), - ])), - ('bonding_keys', ['master']), - ])), - ]))]))]), - 'return': [ - 'auto eth8\n', - 'iface eth8 inet manual\n', - ' bond-master bond0\n', - '\n']}, - - # Bond; with address IPv4 and IPv6 address; slaves as string - {'iface_name': 'bond9', 'iface_type': 'bond', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '10.1.0.14', - 'netmask': '255.255.255.0', - 'gateway': '10.1.0.1', - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:c0::3', - 'ipv6netmask': '64', - 'ipv6gateway': '2001:db8:dead:c0::1', - 'mode': '802.3ad', - 'slaves': 'eth4 eth5', - 'enable_ipv6': True, - 'noifupdown': True, - }, - 'get_interface': odict([('bond9', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '10.1.0.14'), - ('netmask', '255.255.255.0'), - ('gateway', '10.1.0.1'), - ('bonding', odict([ - ('ad_select', '0'), - ('downdelay', '200'), - ('lacp_rate', '0'), - ('miimon', '100'), - ('mode', '4'), - ('slaves', 'eth4 eth5'), - ('updelay', '0'), - ('use_carrier', 'on'), - ])), - ('bonding_keys', [ - 'ad_select', - 'downdelay', - 'lacp_rate', - 'miimon', - 'mode', - 'slaves', - 'updelay', - 'use_carrier', - ]), - ])), - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:c0::3'), - ('netmask', 64), - ('gateway', '2001:db8:dead:c0::1'), - ('bonding', odict([ - ('ad_select', '0'), - ('downdelay', '200'), - ('lacp_rate', '0'), - ('miimon', '100'), - ('mode', '4'), - ('slaves', 'eth4 eth5'), - ('updelay', '0'), - ('use_carrier', 'on'), - ])), - ('bonding_keys', [ - 'ad_select', - 'downdelay', - 'lacp_rate', - 'miimon', - 'mode', - 'slaves', - 'updelay', - 'use_carrier', - ]), - ])), - ]))]))]), - 'return': [ - 'auto bond9\n', - 'iface bond9 inet static\n', - ' address 10.1.0.14\n', - ' netmask 255.255.255.0\n', - ' gateway 10.1.0.1\n', - ' bond-ad_select 0\n', - ' bond-downdelay 200\n', - ' bond-lacp_rate 0\n', - ' bond-miimon 100\n', - ' bond-mode 4\n', - ' bond-slaves eth4 eth5\n', - ' bond-updelay 0\n', - ' bond-use_carrier on\n', - 'iface bond9 inet6 static\n', - ' address 2001:db8:dead:c0::3\n', - ' netmask 64\n', - ' gateway 2001:db8:dead:c0::1\n', - ' bond-ad_select 0\n', - ' bond-downdelay 200\n', - ' bond-lacp_rate 0\n', - ' bond-miimon 100\n', - ' bond-mode 4\n', - ' bond-slaves eth4 eth5\n', - ' bond-updelay 0\n', - ' bond-use_carrier on\n', - '\n']}, - - # Bond; with address IPv4 and IPv6 address; slaves as list - {'iface_name': 'bond10', 'iface_type': 'bond', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '10.1.0.14', - 'netmask': '255.255.255.0', - 'gateway': '10.1.0.1', - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:c0::3', - 'ipv6netmask': '64', - 'ipv6gateway': '2001:db8:dead:c0::1', - 'mode': '802.3ad', - 'slaves': ['eth4', 'eth5'], - 'enable_ipv6': True, - 'noifupdown': True, - }, - 'get_interface': odict([('bond10', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '10.1.0.14'), - ('netmask', '255.255.255.0'), - ('gateway', '10.1.0.1'), - ('bonding', odict([ - ('ad_select', '0'), - ('downdelay', '200'), - ('lacp_rate', '0'), - ('miimon', '100'), - ('mode', '4'), - ('slaves', 'eth4 eth5'), - ('updelay', '0'), - ('use_carrier', 'on'), - ])), - ('bonding_keys', [ - 'ad_select', - 'downdelay', - 'lacp_rate', - 'miimon', - 'mode', - 'slaves', - 'updelay', - 'use_carrier', - ]), - ])), - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:c0::3'), - ('netmask', 64), - ('gateway', '2001:db8:dead:c0::1'), - ('bonding', odict([ - ('ad_select', '0'), - ('downdelay', '200'), - ('lacp_rate', '0'), - ('miimon', '100'), - ('mode', '4'), - ('slaves', 'eth4 eth5'), - ('updelay', '0'), - ('use_carrier', 'on'), - ])), - ('bonding_keys', [ - 'ad_select', - 'downdelay', - 'lacp_rate', - 'miimon', - 'mode', - 'slaves', - 'updelay', - 'use_carrier', - ]), - ])), - ]))]))]), - 'return': [ - 'auto bond10\n', - 'iface bond10 inet static\n', - ' address 10.1.0.14\n', - ' netmask 255.255.255.0\n', - ' gateway 10.1.0.1\n', - ' bond-ad_select 0\n', - ' bond-downdelay 200\n', - ' bond-lacp_rate 0\n', - ' bond-miimon 100\n', - ' bond-mode 4\n', - ' bond-slaves eth4 eth5\n', - ' bond-updelay 0\n', - ' bond-use_carrier on\n', - 'iface bond10 inet6 static\n', - ' address 2001:db8:dead:c0::3\n', - ' netmask 64\n', - ' gateway 2001:db8:dead:c0::1\n', - ' bond-ad_select 0\n', - ' bond-downdelay 200\n', - ' bond-lacp_rate 0\n', - ' bond-miimon 100\n', - ' bond-mode 4\n', - ' bond-slaves eth4 eth5\n', - ' bond-updelay 0\n', - ' bond-use_carrier on\n', - '\n']}, - - # Bond VLAN; with IPv4 address - {'iface_name': 'bond0.11', 'iface_type': 'vlan', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '10.7.0.8', - 'netmask': '255.255.255.0', - 'gateway': '10.7.0.1', - 'slaves': 'eth6 eth7', - 'mode': '802.3ad', - 'enable_ipv6': False, - 'noifupdown': True, - }, - 'get_interface': odict([('bond0.11', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('vlan_raw_device', 'bond1'), - ('address', '10.7.0.8'), - ('netmask', '255.255.255.0'), - ('gateway', '10.7.0.1'), - ('mode', '802.3ad'), - ])), - ]))]))]), - 'return': [ - 'auto bond0.11\n', - 'iface bond0.11 inet static\n', - ' vlan-raw-device bond1\n', - ' address 10.7.0.8\n', - ' netmask 255.255.255.0\n', - ' gateway 10.7.0.1\n', - ' mode 802.3ad\n', - '\n']}, - - # Bond; without address - {'iface_name': 'bond0.12', 'iface_type': 'vlan', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'slaves': 'eth6 eth7', - 'mode': '802.3ad', - 'enable_ipv6': False, - 'noifupdown': True, - }, - 'get_interface': odict([('bond0.12', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('vlan_raw_device', 'bond1'), - ('mode', '802.3ad'), - ])), - ]))]))]), - 'return': [ - 'auto bond0.12\n', - 'iface bond0.12 inet static\n', - ' vlan-raw-device bond1\n', - ' mode 802.3ad\n', - '\n']}, - - # Bridged interface - {'iface_name': 'br0', 'iface_type': 'bridge', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '192.168.4.10', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'bridge_ports': 'eth1', - 'enable_ipv6': False, - 'noifupdown': True, - }, - 'get_interface': odict([('br0', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '192.168.4.10'), - ('netmask', '255.255.255.0'), - ('gateway', '192.168.4.1'), - ('bridging', odict([ - ('ports', 'eth1'), - ])), - ('bridging_keys', ['ports']), - ])), - ]))]))]), - 'return': [ - 'auto br0\n', - 'iface br0 inet static\n', - ' address 192.168.4.10\n', - ' netmask 255.255.255.0\n', - ' gateway 192.168.4.1\n', - ' bridge_ports eth1\n', - '\n']}, - - - # DNS NS as list - {'iface_name': 'eth13', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '192.168.4.9', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'enable_ipv6': False, - 'noifupdown': True, - 'dns': ['8.8.8.8', '8.8.4.4'], - }, - 'get_interface': odict([('eth13', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '192.168.4.9'), - ('netmask', '255.255.255.0'), - ('gateway', '192.168.4.1'), - ('dns_nameservers', ['8.8.8.8', '8.8.4.4']), - ])), - ]))]))]), - 'return': [ - 'auto eth13\n', - 'iface eth13 inet static\n', - ' address 192.168.4.9\n', - ' netmask 255.255.255.0\n', - ' gateway 192.168.4.1\n', - ' dns-nameservers 8.8.8.8 8.8.4.4\n', - '\n']}, - - # DNS NS as string - {'iface_name': 'eth14', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'static', - 'ipaddr': '192.168.4.9', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'enable_ipv6': False, - 'noifupdown': True, - 'dns': '8.8.8.8 8.8.4.4', - }, - 'get_interface': odict([('eth14', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'static'), - ('filename', None), - ('address', '192.168.4.9'), - ('netmask', '255.255.255.0'), - ('gateway', '192.168.4.1'), - ('dns_nameservers', ['8.8.8.8', '8.8.4.4']), - ])), - ]))]))]), - 'return': [ - 'auto eth14\n', - 'iface eth14 inet static\n', - ' address 192.168.4.9\n', - ' netmask 255.255.255.0\n', - ' gateway 192.168.4.1\n', - ' dns-nameservers 8.8.8.8 8.8.4.4\n', - '\n']}, - - # Loopback; with IPv4 and IPv6 address - {'iface_name': 'lo15', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'loopback', - 'ipaddr': '192.168.4.9', - 'netmask': '255.255.255.0', - 'gateway': '192.168.4.1', - 'enable_ipv6': True, - 'ipv6proto': 'loopback', - 'ipv6ipaddr': 'fc00::1', - 'ipv6netmask': '128', - 'ipv6_autoconf': False, - 'noifupdown': True, - }, - 'get_interface': odict([('lo15', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'loopback'), - ('filename', None), - ('address', '192.168.4.9'), - ('netmask', '255.255.255.0'), - ('gateway', '192.168.4.1'), - ])), - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'loopback'), - ('filename', None), - ('address', 'fc00::1'), - ('netmask', 128), - ])), - ]))]))]), - 'return': [ - 'auto lo15\n', - 'iface lo15 inet loopback\n', - ' address 192.168.4.9\n', - ' netmask 255.255.255.0\n', - ' gateway 192.168.4.1\n', - 'iface lo15 inet6 loopback\n', - ' address fc00::1\n', - ' netmask 128\n', - '\n']}, - - # Loopback; with only IPv6 address; enabled=False - {'iface_name': 'lo16', 'iface_type': 'eth', 'enabled': False, - 'build_interface': { - 'enable_ipv6': True, - 'ipv6proto': 'loopback', - 'ipv6ipaddr': 'fc00::1', - 'ipv6netmask': '128', - 'ipv6_autoconf': False, - 'noifupdown': True, - }, - 'get_interface': odict([('lo16', odict([('data', odict([ - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'loopback'), - ('filename', None), - ('address', 'fc00::1'), - ('netmask', 128), - ])), - ]))]))]), - 'return': [ - 'iface lo16 inet6 loopback\n', - ' address fc00::1\n', - ' netmask 128\n', - '\n']}, - - # Loopback; without address - {'iface_name': 'lo17', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'loopback', - 'enable_ipv6': False, - 'noifupdown': True, - }, - 'get_interface': odict([('lo17', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'loopback'), - ('filename', None), - ])), - ]))]))]), - 'return': [ - 'auto lo17\n', - 'iface lo17 inet loopback\n', - '\n']}, - - # IPv4=DHCP; IPv6=Static; with IPv6 netmask - {'iface_name': 'eth18', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'dhcp', - 'enable_ipv6': True, - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:c0::3', - 'ipv6netmask': '64', - 'ipv6gateway': '2001:db8:dead:c0::1', - 'noifupdown': True, - }, - 'get_interface': odict([('eth18', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'dhcp'), - ('filename', None), - ])), - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:c0::3'), - ('netmask', 64), - ('gateway', '2001:db8:dead:c0::1'), - ])), - ]))]))]), - 'return': [ - 'auto eth18\n', - 'iface eth18 inet dhcp\n', - 'iface eth18 inet6 static\n', - ' address 2001:db8:dead:c0::3\n', - ' netmask 64\n', - ' gateway 2001:db8:dead:c0::1\n', - '\n']}, - - # IPv4=DHCP; IPv6=Static; without IPv6 netmask - {'iface_name': 'eth19', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'proto': 'dhcp', - 'enable_ipv6': True, - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:c0::3/64', - 'ipv6gateway': '2001:db8:dead:c0::1', - 'noifupdown': True, - }, - 'get_interface': odict([('eth19', odict([('enabled', True), ('data', odict([ - ('inet', odict([ - ('addrfam', 'inet'), - ('proto', 'dhcp'), - ('filename', None), - ])), - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('address', '2001:db8:dead:c0::3/64'), - ('gateway', '2001:db8:dead:c0::1'), - ])), - ]))]))]), - 'return': [ - 'auto eth19\n', - 'iface eth19 inet dhcp\n', - 'iface eth19 inet6 static\n', - ' address 2001:db8:dead:c0::3/64\n', - ' gateway 2001:db8:dead:c0::1\n', - '\n']}, - - # IPv6-only; static with autoconf and accept_ra forced - {'iface_name': 'eth20', 'iface_type': 'eth', 'enabled': True, - 'build_interface': { - 'ipv6proto': 'static', - 'ipv6ipaddr': '2001:db8:dead:beef::3/64', - 'ipv6gateway': '2001:db8:dead:beef::1', - 'enable_ipv6': True, - 'autoconf': 1, - 'accept_ra': 2, - 'noifupdown': True, - }, - 'get_interface': odict([('eth20', odict([('enabled', True), ('data', odict([ - ('inet6', odict([ - ('addrfam', 'inet6'), - ('proto', 'static'), - ('filename', None), - ('autoconf', 1), - ('address', '2001:db8:dead:beef::3/64'), - ('gateway', '2001:db8:dead:beef::1'), - ('accept_ra', 2), - ])), - ]))]))]), - 'return': [ - 'auto eth20\n', - 'iface eth20 inet6 static\n', - ' autoconf 1\n', - ' address 2001:db8:dead:beef::3/64\n', - ' gateway 2001:db8:dead:beef::1\n', - ' accept_ra 2\n', - '\n']}, - ] -# fmt: on - - -@pytest.mark.skip_on_windows(reason="Do not run these tests on Windows") -class DebianIpTestCase(TestCase, LoaderModuleMockMixin): - """ - Test cases for salt.modules.debian_ip - """ - - def setup_loader_modules(self): - return {debian_ip: {}} - - # 'build_bond' function tests: 3 - - def test_build_bond(self): - """ - Test if it create a bond script in /etc/modprobe.d with the passed - settings and load the bonding kernel module. - """ - with patch( - "salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={}) - ), patch("salt.modules.debian_ip._write_file", MagicMock(return_value=True)): - mock = MagicMock(return_value=1) - with patch.dict(debian_ip.__grains__, {"osrelease": mock}): - mock = MagicMock(return_value=True) - with patch.dict( - debian_ip.__salt__, {"kmod.load": mock, "pkg.install": mock} - ): - self.assertEqual(debian_ip.build_bond("bond0"), "") - - def test_error_message_iface_should_process_non_str_expected(self): - values = [1, True, False, "no-kaboom"] - iface = "ethtest" - option = "test" - msg = debian_ip._error_msg_iface(iface, option, values) - self.assertTrue(msg.endswith("[1|True|False|no-kaboom]"), msg) - - def test_error_message_network_should_process_non_str_expected(self): - values = [1, True, False, "no-kaboom"] - msg = debian_ip._error_msg_network("fnord", values) - self.assertTrue(msg.endswith("[1|True|False|no-kaboom]"), msg) - - def test_build_bond_exception(self): - """ - Test if it create a bond script in /etc/modprobe.d with the passed - settings and load the bonding kernel module. - """ - with patch( - "salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={}) - ): - mock = MagicMock(return_value=1) - with patch.dict(debian_ip.__grains__, {"osrelease": mock}): - mock = MagicMock( - side_effect=jinja2.exceptions.TemplateNotFound("error") - ) - with patch.object(jinja2.Environment, "get_template", mock): - self.assertEqual(debian_ip.build_bond("bond0"), "") - - def test_build_bond_data(self): - """ - Test if it create a bond script in /etc/modprobe.d with the passed - settings and load the bonding kernel module. - """ - with patch( - "salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={}) - ), patch("salt.modules.debian_ip._read_temp", MagicMock(return_value=True)): - mock = MagicMock(return_value=1) - with patch.dict(debian_ip.__grains__, {"osrelease": mock}): - self.assertTrue(debian_ip.build_bond("bond0", test="True")) - - # 'build_routes' function tests: 2 - - def test_build_routes(self): - """ - Test if it add route scripts for a network interface using up commands. - """ - with patch( - "salt.modules.debian_ip._parse_routes", - MagicMock(return_value={"routes": []}), - ), patch( - "salt.modules.debian_ip._write_file_routes", MagicMock(return_value=True) - ), patch( - "salt.modules.debian_ip._read_file", MagicMock(return_value="salt") - ): - self.assertEqual(debian_ip.build_routes("eth0"), "saltsalt") - - def test_build_routes_exception(self): - """ - Test if it add route scripts for a network interface using up commands. - """ - with patch( - "salt.modules.debian_ip._parse_routes", - MagicMock(return_value={"routes": []}), - ): - self.assertTrue(debian_ip.build_routes("eth0", test="True")) - - mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound("err")) - with patch.object(jinja2.Environment, "get_template", mock): - self.assertEqual(debian_ip.build_routes("eth0"), "") - - # 'down' function tests: 1 - - def test_down(self): - """ - Test if it shutdown a network interface - """ - self.assertEqual(debian_ip.down("eth0", "slave"), None) - - mock = MagicMock(return_value="Salt") - with patch.dict(debian_ip.__salt__, {"cmd.run": mock}): - self.assertEqual(debian_ip.down("eth0", "eth"), "Salt") - - # 'get_bond' function tests: 1 - - def test_get_bond(self): - """ - Test if it return the content of a bond script - """ - self.assertEqual(debian_ip.get_bond("bond0"), "") - - # '_parse_interfaces' function tests: 1 - - def test_parse_interfaces(self): - """ - Test if it returns the correct data for parsed configuration file - """ - with tempfile.NamedTemporaryFile(mode="r", delete=True) as tfile: - for iface in test_interfaces: - iname = iface["iface_name"] - if iface.get("skip_test", False): - continue - with salt.utils.files.fopen(str(tfile.name), "w") as fh: - fh.writelines(iface["return"]) - for inet in ["inet", "inet6"]: - if inet in iface["get_interface"][iname]["data"]: - iface["get_interface"][iname]["data"][inet]["filename"] = str( - tfile.name - ) - self.assertDictEqual( - debian_ip._parse_interfaces([str(tfile.name)]), - iface["get_interface"], - ) - - # 'get_interface' function tests: 1 - - def test_get_interface(self): - """ - Test if it return the contents of an interface script - """ - for iface in test_interfaces: - if iface.get("skip_test", False): - continue - with patch.object( - debian_ip, - "_parse_interfaces", - MagicMock(return_value=iface["get_interface"]), - ): - self.assertListEqual( - debian_ip.get_interface(iface["iface_name"]), iface["return"] - ) - - # 'build_interface' function tests: 1 - - def test_build_interface(self): - """ - Test if it builds an interface script for a network interface. - """ - with patch( - "salt.modules.debian_ip._write_file_ifaces", MagicMock(return_value="salt") - ): - self.assertEqual( - debian_ip.build_interface("eth0", "eth", "enabled"), - ["s\n", "a\n", "l\n", "t\n"], - ) - - self.assertTrue( - debian_ip.build_interface("eth0", "eth", "enabled", test="True") - ) - - with patch.object( - debian_ip, "_parse_settings_eth", MagicMock(return_value={"routes": []}) - ): - for eth_t in ["bridge", "slave", "bond"]: - self.assertRaises( - AttributeError, - debian_ip.build_interface, - "eth0", - eth_t, - "enabled", - ) - - self.assertTrue( - debian_ip.build_interface("eth0", "eth", "enabled", test="True") - ) - - with tempfile.NamedTemporaryFile(mode="r", delete=True) as tfile: - with patch("salt.modules.debian_ip._DEB_NETWORK_FILE", str(tfile.name)): - for iface in test_interfaces: - if iface.get("skip_test", False): - continue - # Skip tests that require __salt__['pkg.install']() - if iface["iface_type"] in ["bridge", "pppoe", "vlan"]: - continue - self.assertListEqual( - debian_ip.build_interface( - iface=iface["iface_name"], - iface_type=iface["iface_type"], - enabled=iface["enabled"], - interface_file=tfile.name, - **iface["build_interface"] - ), - iface["return"], - ) - - # 'up' function tests: 1 - - def test_up(self): - """ - Test if it start up a network interface - """ - self.assertEqual(debian_ip.down("eth0", "slave"), None) - - mock = MagicMock(return_value="Salt") - with patch.dict(debian_ip.__salt__, {"cmd.run": mock}): - self.assertEqual(debian_ip.up("eth0", "eth"), "Salt") - - # 'get_network_settings' function tests: 1 - - def test_get_network_settings(self): - """ - Test if it return the contents of the global network script. - """ - with patch.dict( - debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "14"} - ), patch( - "salt.modules.debian_ip._parse_hostname", - MagicMock(return_value="SaltStack"), - ), patch( - "salt.modules.debian_ip._parse_domainname", - MagicMock(return_value="saltstack.com"), - ): - mock_avai = MagicMock(return_value=True) - with patch.dict( - debian_ip.__salt__, - {"service.available": mock_avai, "service.status": mock_avai}, - ): - self.assertEqual( - debian_ip.get_network_settings(), - [ - "NETWORKING=yes\n", - "HOSTNAME=SaltStack\n", - "DOMAIN=saltstack.com\n", - ], - ) - - mock = MagicMock( - side_effect=jinja2.exceptions.TemplateNotFound("error") - ) - with patch.object(jinja2.Environment, "get_template", mock): - self.assertEqual(debian_ip.get_network_settings(), "") - - # 'get_routes' function tests: 1 - - def test_get_routes(self): - """ - Test if it return the routes for the interface - """ - with patch("salt.modules.debian_ip._read_file", MagicMock(return_value="salt")): - self.assertEqual(debian_ip.get_routes("eth0"), "saltsalt") - - # 'apply_network_settings' function tests: 1 - - @pytest.mark.slow_test - def test_apply_network_settings(self): - """ - Test if it apply global network configuration. - """ - mock = MagicMock(return_value=True) - with patch.dict( - debian_ip.__salt__, - {"network.mod_hostname": mock, "service.stop": mock, "service.start": mock}, - ): - self.assertEqual(debian_ip.apply_network_settings(), True) - - # 'build_network_settings' function tests: 1 - - def test_build_network_settings(self): - """ - Test if it build the global network script. - """ - with patch( - "salt.modules.debian_ip._parse_network_settings", - MagicMock( - return_value={ - "networking": "yes", - "hostname": "Salt.saltstack.com", - "domainname": "saltstack.com", - "search": "test.saltstack.com", - } - ), - ), patch( - "salt.modules.debian_ip._write_file_network", MagicMock(return_value=True) - ): - with patch.dict( - debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "14"} - ): - mock = MagicMock(return_value=True) - with patch.dict( - debian_ip.__salt__, - { - "service.available": mock, - "service.disable": mock, - "service.enable": mock, - }, - ): - self.assertEqual( - debian_ip.build_network_settings(), - [ - "NETWORKING=yes\n", - "HOSTNAME=Salt\n", - "DOMAIN=saltstack.com\n", - "SEARCH=test.saltstack.com\n", - ], - ) - - mock = MagicMock( - side_effect=jinja2.exceptions.TemplateNotFound("error") - ) - with patch.object(jinja2.Environment, "get_template", mock): - self.assertEqual(debian_ip.build_network_settings(), "") - - with patch.dict( - debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "10"} - ): - mock = MagicMock(return_value=True) - with patch.dict( - debian_ip.__salt__, - { - "service.available": mock, - "service.disable": mock, - "service.enable": mock, - }, - ): - mock = MagicMock( - side_effect=jinja2.exceptions.TemplateNotFound("error") - ) - with patch.object(jinja2.Environment, "get_template", mock): - self.assertEqual(debian_ip.build_network_settings(), "") - - with patch.object( - debian_ip, "_read_temp", MagicMock(return_value=True) - ): - self.assertTrue(debian_ip.build_network_settings(test="True")) diff --git a/tests/unit/modules/test_dig.py b/tests/unit/modules/test_dig.py deleted file mode 100644 index ca2ccefd9312..000000000000 --- a/tests/unit/modules/test_dig.py +++ /dev/null @@ -1,193 +0,0 @@ -""" - :codeauthor: Nicole Thomas -""" -import pytest - -import salt.modules.dig as dig -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - -_SPF_VALUES = { - "dig +short xmission.com TXT": { - "pid": 27282, - "retcode": 0, - "stderr": "", - "stdout": '"v=spf1 a mx include:_spf.xmission.com ?all"', - }, - "dig +short _spf.xmission.com TXT": { - "pid": 27282, - "retcode": 0, - "stderr": "", - "stdout": '"v=spf1 a mx ip4:198.60.22.0/24 ip4:166.70.13.0/24 ~all"', - }, - "dig +short xmission-redirect.com TXT": { - "pid": 27282, - "retcode": 0, - "stderr": "", - "stdout": "v=spf1 redirect=_spf.xmission.com", - }, - "dig +short foo.com TXT": { - "pid": 27282, - "retcode": 0, - "stderr": "", - "stdout": "v=spf1 ip4:216.73.93.70/31 ip4:216.73.93.72/31 ~all", - }, -} - - -def _spf_side_effect(key, python_shell=False): - return _SPF_VALUES.get( - " ".join(key), {"pid": 27310, "retcode": 0, "stderr": "", "stdout": ""} - ) - - -@pytest.mark.skipif(dig.__virtual__() is False, reason="Dig must be installed") -class DigTestCase(TestCase, LoaderModuleMockMixin): - def setup_loader_modules(self): - return {dig: {}} - - def test_check_ip(self): - self.assertTrue(dig.check_ip("127.0.0.1"), msg="Not a valid ip address") - - def test_check_ip_ipv6(self): - self.assertTrue( - dig.check_ip("1111:2222:3333:4444:5555:6666:7777:8888"), - msg="Not a valid ip address", - ) - - def test_check_ip_ipv6_valid(self): - self.assertTrue(dig.check_ip("2607:fa18:0:3::4")) - - def test_check_ip_neg(self): - self.assertFalse( - dig.check_ip("-127.0.0.1"), msg="Did not detect negative value as invalid" - ) - - def test_check_ip_empty(self): - self.assertFalse(dig.check_ip(""), msg="Did not detect empty value as invalid") - - def test_a(self): - dig_mock = MagicMock( - return_value={ - "pid": 3656, - "retcode": 0, - "stderr": "", - "stdout": ( - "74.125.193.104\n" - "74.125.193.105\n" - "74.125.193.99\n" - "74.125.193.106\n" - "74.125.193.103\n" - "74.125.193.147" - ), - } - ) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual( - dig.A("www.google.com"), - [ - "74.125.193.104", - "74.125.193.105", - "74.125.193.99", - "74.125.193.106", - "74.125.193.103", - "74.125.193.147", - ], - ) - - def test_ptr(self): - dig_mock = MagicMock( - return_value={ - "pid": 3657, - "retcode": 0, - "stderr": "", - "stdout": ("dns.google."), - } - ) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual( - dig.ptr("8.8.8.8"), - [ - "dns.google.", - ], - ) - - def test_aaaa(self): - dig_mock = MagicMock( - return_value={ - "pid": 25451, - "retcode": 0, - "stderr": "", - "stdout": "2607:f8b0:400f:801::1014", - } - ) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual(dig.AAAA("www.google.com"), ["2607:f8b0:400f:801::1014"]) - - def test_ns(self): - with patch("salt.modules.dig.A", MagicMock(return_value=["ns4.google.com."])): - dig_mock = MagicMock( - return_value={ - "pid": 26136, - "retcode": 0, - "stderr": "", - "stdout": "ns4.google.com.", - } - ) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual(dig.NS("google.com"), ["ns4.google.com."]) - - def test_spf(self): - dig_mock = MagicMock(side_effect=_spf_side_effect) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual(dig.SPF("foo.com"), ["216.73.93.70/31", "216.73.93.72/31"]) - - def test_spf_redir(self): - """ - Test for SPF records which use the 'redirect' SPF mechanism - https://en.wikipedia.org/wiki/Sender_Policy_Framework#Mechanisms - """ - dig_mock = MagicMock(side_effect=_spf_side_effect) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual( - dig.SPF("xmission-redirect.com"), ["198.60.22.0/24", "166.70.13.0/24"] - ) - - def test_spf_include(self): - """ - Test for SPF records which use the 'include' SPF mechanism - https://en.wikipedia.org/wiki/Sender_Policy_Framework#Mechanisms - """ - dig_mock = MagicMock(side_effect=_spf_side_effect) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual( - dig.SPF("xmission.com"), ["198.60.22.0/24", "166.70.13.0/24"] - ) - - def test_mx(self): - dig_mock = MagicMock( - return_value={ - "pid": 27780, - "retcode": 0, - "stderr": "", - "stdout": ( - "10 aspmx.l.google.com.\n" - "20 alt1.aspmx.l.google.com.\n" - "40 alt3.aspmx.l.google.com.\n" - "50 alt4.aspmx.l.google.com.\n" - "30 alt2.aspmx.l.google.com." - ), - } - ) - with patch.dict(dig.__salt__, {"cmd.run_all": dig_mock}): - self.assertEqual( - dig.MX("google.com"), - [ - ["10", "aspmx.l.google.com."], - ["20", "alt1.aspmx.l.google.com."], - ["40", "alt3.aspmx.l.google.com."], - ["50", "alt4.aspmx.l.google.com."], - ["30", "alt2.aspmx.l.google.com."], - ], - ) diff --git a/tests/unit/modules/test_dnsutil.py b/tests/unit/modules/test_dnsutil.py deleted file mode 100644 index 7d2a2f61b3ce..000000000000 --- a/tests/unit/modules/test_dnsutil.py +++ /dev/null @@ -1,145 +0,0 @@ -""" - :codeauthor: Nicole Thomas -""" - -import logging - -import pytest - -import salt.modules.dnsutil as dnsutil -import salt.utils.stringutils -from tests.support.mock import MagicMock, mock_open, patch -from tests.support.unit import TestCase - -log = logging.getLogger(__name__) - -mock_hosts_file = salt.utils.stringutils.to_str( - "##\n" - "# Host Database\n" - "#\n" - "# localhost is used to configure the loopback interface\n" - "# when the system is booting. Do not change this entry.\n" - "##\n" - "127.0.0.1 localhost\n" - "255.255.255.255 broadcasthost\n" - "::1 localhost\n" - "fe80::1%lo0 localhost" -) - -mock_hosts_file_rtn = { - "::1": ["localhost"], - "255.255.255.255": ["broadcasthost"], - "127.0.0.1": ["localhost"], - "fe80::1%lo0": ["localhost"], -} - -mock_soa_zone = salt.utils.stringutils.to_str( - "$TTL 3D\n" - "@ IN SOA land-5.com. root.land-5.com. (\n" - "199609203 ; Serial\n" - "28800 ; Refresh\n" - "7200 ; Retry\n" - "604800 ; Expire\n" - "86400) ; Minimum TTL\n" - "NS land-5.com.\n\n" - "1 PTR localhost." -) - -mock_writes_list = salt.utils.data.decode( - [ - "##\n", - "# Host Database\n", - "#\n", - "# localhost is used to configure the loopback interface\n", - "# when the system is booting. Do not change this entry.\n", - "##\n", - "127.0.0.1 localhost", - "\n", - "255.255.255.255 broadcasthost", - "\n", - "::1 localhost", - "\n", - "fe80::1%lo0 localhost", - "\n", - ], - to_str=True, -) - - -class DNSUtilTestCase(TestCase): - def test_parse_hosts(self): - with patch("salt.utils.files.fopen", mock_open(read_data=mock_hosts_file)): - self.assertEqual( - dnsutil.parse_hosts(), - { - "::1": ["localhost"], - "255.255.255.255": ["broadcasthost"], - "127.0.0.1": ["localhost"], - "fe80::1%lo0": ["localhost"], - }, - ) - - def test_hosts_append(self): - with patch( - "salt.utils.files.fopen", mock_open(read_data=mock_hosts_file) - ) as m_open, patch( - "salt.modules.dnsutil.parse_hosts", - MagicMock(return_value=mock_hosts_file_rtn), - ): - dnsutil.hosts_append("/etc/hosts", "127.0.0.1", "ad1.yuk.co,ad2.yuk.co") - writes = m_open.write_calls() - # We should have called .write() only once, with the expected - # content - num_writes = len(writes) - assert num_writes == 1, num_writes - expected = salt.utils.stringutils.to_str( - "\n127.0.0.1 ad1.yuk.co ad2.yuk.co" - ) - assert writes[0] == expected, writes[0] - - def test_hosts_remove(self): - to_remove = "ad1.yuk.co" - new_mock_file = mock_hosts_file + "\n127.0.0.1 " + to_remove + "\n" - with patch( - "salt.utils.files.fopen", mock_open(read_data=new_mock_file) - ) as m_open: - dnsutil.hosts_remove("/etc/hosts", to_remove) - writes = m_open.write_calls() - assert writes == mock_writes_list, writes - - @pytest.mark.skip(reason="Waiting on bug report fixes") - def test_parse_zone(self): - with patch("salt.utils.files.fopen", mock_open(read_data=mock_soa_zone)): - log.debug(mock_soa_zone) - log.debug(dnsutil.parse_zone("/var/lib/named/example.com.zone")) - - def test_to_seconds_hour(self): - self.assertEqual( - dnsutil._to_seconds("4H"), - 14400, - msg="Did not detect valid hours as invalid", - ) - - def test_to_seconds_day(self): - self.assertEqual( - dnsutil._to_seconds("1D"), 86400, msg="Did not detect valid day as invalid" - ) - - def test_to_seconds_week(self): - self.assertEqual( - dnsutil._to_seconds("2W"), - 604800, - msg="Did not set time greater than one week to one week", - ) - - def test_to_seconds_empty(self): - self.assertEqual( - dnsutil._to_seconds(""), 604800, msg="Did not set empty time to one week" - ) - - def test_to_seconds_large(self): - self.assertEqual( - dnsutil._to_seconds("604801"), - 604800, - msg="Did not set time greater than one week to one week", - ) diff --git a/tests/unit/modules/test_dpkg_lowpkg.py b/tests/unit/modules/test_dpkg_lowpkg.py deleted file mode 100644 index a97519f4891f..000000000000 --- a/tests/unit/modules/test_dpkg_lowpkg.py +++ /dev/null @@ -1,359 +0,0 @@ -""" - :codeauthor: Jayesh Kariya -""" - - -import logging -import os - -import salt.modules.dpkg_lowpkg as dpkg -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - -DPKG_ERROR_MSG = """dpkg-query: package 'httpd' is not installed -Use dpkg --contents (= dpkg-deb --contents) to list archive files contents. -""" - -DPKG_L_OUTPUT = { - "hostname": """\ -/. -/bin -/bin/hostname -/usr -/usr/share -/usr/share/doc -/usr/share/doc/hostname -/usr/share/doc/hostname/changelog.gz -/usr/share/doc/hostname/copyright -/usr/share/man -/usr/share/man/man1 -/usr/share/man/man1/hostname.1.gz -/bin/dnsdomainname -/bin/domainname -/bin/nisdomainname -/bin/ypdomainname -/usr/share/man/man1/dnsdomainname.1.gz -/usr/share/man/man1/domainname.1.gz -/usr/share/man/man1/nisdomainname.1.gz -/usr/share/man/man1/ypdomainname.1.gz -""" -} - - -class DpkgTestCase(TestCase, LoaderModuleMockMixin): - """ - Test cases for salt.modules.dpkg - """ - - def setUp(self): - dpkg_lowpkg_logger = logging.getLogger("salt.modules.dpkg_lowpkg") - self.level = dpkg_lowpkg_logger.level - dpkg_lowpkg_logger.setLevel(logging.FATAL) - - def tearDown(self): - logging.getLogger("salt.modules.dpkg_lowpkg").setLevel(self.level) - - def dpkg_L_side_effect(self, cmd, **kwargs): - self.assertEqual(cmd[:2], ["dpkg", "-L"]) - package = cmd[2] - return DPKG_L_OUTPUT[package] - - def setup_loader_modules(self): - return {dpkg: {}} - - # 'unpurge' function tests: 2 - - def test_unpurge(self): - """ - Test if it change package selection for each package - specified to 'install' - """ - mock = MagicMock(return_value=[]) - with patch.dict(dpkg.__salt__, {"pkg.list_pkgs": mock, "cmd.run": mock}): - self.assertDictEqual(dpkg.unpurge("curl"), {}) - - def test_unpurge_empty_package(self): - """ - Test if it change package selection for each package - specified to 'install' - """ - self.assertDictEqual(dpkg.unpurge(), {}) - - # 'list_pkgs' function tests: 1 - - def test_list_pkgs(self): - """ - Test if it lists the packages currently installed - """ - mock = MagicMock( - return_value={ - "retcode": 0, - "stderr": "", - "stdout": "installed\thostname\t3.21", - } - ) - with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): - self.assertDictEqual(dpkg.list_pkgs("hostname"), {"hostname": "3.21"}) - - mock = MagicMock( - return_value={ - "retcode": 1, - "stderr": "dpkg-query: no packages found matching httpd", - "stdout": "", - } - ) - with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): - self.assertEqual( - dpkg.list_pkgs("httpd"), - "Error: dpkg-query: no packages found matching httpd", - ) - - # 'file_list' function tests: 1 - - def test_file_list(self): - """ - Test if it lists the files that belong to a package. - """ - dpkg_query_mock = MagicMock( - return_value={"retcode": 0, "stderr": "", "stdout": "installed\thostname"} - ) - dpkg_L_mock = MagicMock(side_effect=self.dpkg_L_side_effect) - with patch.dict( - dpkg.__salt__, {"cmd.run_all": dpkg_query_mock, "cmd.run": dpkg_L_mock} - ): - self.assertDictEqual( - dpkg.file_list("hostname"), - { - "errors": [], - "files": [ - "/.", - "/bin", - "/bin/dnsdomainname", - "/bin/domainname", - "/bin/hostname", - "/bin/nisdomainname", - "/bin/ypdomainname", - "/usr", - "/usr/share", - "/usr/share/doc", - "/usr/share/doc/hostname", - "/usr/share/doc/hostname/changelog.gz", - "/usr/share/doc/hostname/copyright", - "/usr/share/man", - "/usr/share/man/man1", - "/usr/share/man/man1/dnsdomainname.1.gz", - "/usr/share/man/man1/domainname.1.gz", - "/usr/share/man/man1/hostname.1.gz", - "/usr/share/man/man1/nisdomainname.1.gz", - "/usr/share/man/man1/ypdomainname.1.gz", - ], - }, - ) - - mock = MagicMock( - return_value={"retcode": 1, "stderr": DPKG_ERROR_MSG, "stdout": ""} - ) - with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): - self.assertEqual(dpkg.file_list("httpd"), "Error: " + DPKG_ERROR_MSG) - - # 'file_dict' function tests: 1 - - def test_file_dict(self): - """ - Test if it lists the files that belong to a package, grouped by package - """ - dpkg_query_mock = MagicMock( - return_value={"retcode": 0, "stderr": "", "stdout": "installed\thostname"} - ) - dpkg_L_mock = MagicMock(side_effect=self.dpkg_L_side_effect) - with patch.dict( - dpkg.__salt__, {"cmd.run_all": dpkg_query_mock, "cmd.run": dpkg_L_mock} - ): - expected = { - "errors": [], - "packages": { - "hostname": [ - "/.", - "/bin", - "/bin/hostname", - "/usr", - "/usr/share", - "/usr/share/doc", - "/usr/share/doc/hostname", - "/usr/share/doc/hostname/changelog.gz", - "/usr/share/doc/hostname/copyright", - "/usr/share/man", - "/usr/share/man/man1", - "/usr/share/man/man1/hostname.1.gz", - "/bin/dnsdomainname", - "/bin/domainname", - "/bin/nisdomainname", - "/bin/ypdomainname", - "/usr/share/man/man1/dnsdomainname.1.gz", - "/usr/share/man/man1/domainname.1.gz", - "/usr/share/man/man1/nisdomainname.1.gz", - "/usr/share/man/man1/ypdomainname.1.gz", - ] - }, - } - self.assertDictEqual(dpkg.file_dict("hostname"), expected) - - mock = MagicMock( - return_value={"retcode": 1, "stderr": DPKG_ERROR_MSG, "stdout": ""} - ) - with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}): - self.assertEqual(dpkg.file_dict("httpd"), "Error: " + DPKG_ERROR_MSG) - - def test_bin_pkg_info_spaces(self): - """ - Test the bin_pkg_info function - """ - file_proto_mock = MagicMock(return_value=True) - with patch.dict(dpkg.__salt__, {"config.valid_fileproto": file_proto_mock}): - cache_mock = MagicMock(return_value="/path/to/some/package.deb") - with patch.dict(dpkg.__salt__, {"cp.cache_file": cache_mock}): - dpkg_info_mock = MagicMock( - return_value={ - "retcode": 0, - "stderr": "", - "stdout": ( - " new Debian package, version 2.0\n" - " size 123456 bytes: control archive: 4029 bytes.\n" - " Package : package_name\n" - " Version : 1.0\n" - " Section : section_name\n" - " Priority : priority\n" - " Architecture : all\n" - " Description : some package\n" - ), - } - ) - with patch.dict(dpkg.__salt__, {"cmd.run_all": dpkg_info_mock}): - self.assertEqual( - dpkg.bin_pkg_info("package.deb")["name"], "package_name" - ) - - def test_bin_pkg_info_no_spaces(self): - """ - Test the bin_pkg_info function - """ - file_proto_mock = MagicMock(return_value=True) - with patch.dict(dpkg.__salt__, {"config.valid_fileproto": file_proto_mock}): - cache_mock = MagicMock(return_value="/path/to/some/package.deb") - with patch.dict(dpkg.__salt__, {"cp.cache_file": cache_mock}): - dpkg_info_mock = MagicMock( - return_value={ - "retcode": 0, - "stderr": "", - "stdout": ( - " new Debian package, version 2.0\n" - " size 123456 bytes: control archive: 4029 bytes.\n" - " Package: package_name\n" - " Version: 1.0\n" - " Section: section_name\n" - " Priority: priority\n" - " Architecture: all\n" - " Description: some package\n" - ), - } - ) - with patch.dict(dpkg.__salt__, {"cmd.run_all": dpkg_info_mock}): - self.assertEqual( - dpkg.bin_pkg_info("package.deb")["name"], "package_name" - ) - - def test_info(self): - """ - Test package info - """ - mock = MagicMock( - return_value={ - "retcode": 0, - "stderr": "", - "stdout": os.linesep.join( - [ - "package:bash", - "revision:", - "architecture:amd64", - "maintainer:Ubuntu Developers" - " ", - "summary:", - "source:bash", - "version:4.4.18-2ubuntu1", - "section:shells", - "installed_size:1588", - "size:", - "MD5:", - "SHA1:", - "SHA256:", - "origin:", - "homepage:http://tiswww.case.edu/php/chet/bash/bashtop.html", - "status:ii ", - "description:GNU Bourne Again SHell", - " Bash is an sh-compatible command language interpreter that" - " executes", - " commands read from the standard input or from a file. Bash" - " also", - " incorporates useful features from the Korn and C shells (ksh" - " and csh).", - " .", - " Bash is ultimately intended to be a conformant implementation" - " of the", - " IEEE POSIX Shell and Tools specification (IEEE Working Group" - " 1003.2).", - " .", - " The Programmable Completion Code, by Ian Macdonald, is now" - " found in", - " the bash-completion package.", - "", - "*/~^\\*", # pylint: disable=W1401 - ] - ), - } - ) - - with patch.dict(dpkg.__salt__, {"cmd.run_all": mock}), patch.dict( - dpkg.__grains__, {"os": "Ubuntu", "osrelease_info": (18, 4)} - ), patch("salt.utils.path.which", MagicMock(return_value=False)), patch( - "os.path.exists", MagicMock(return_value=False) - ), patch( - "os.path.getmtime", MagicMock(return_value=1560199259.0) - ): - self.assertDictEqual( - dpkg.info("bash"), - { - "bash": { - "architecture": "amd64", - "description": os.linesep.join( - [ - "GNU Bourne Again SHell", - " Bash is an sh-compatible command language interpreter" - " that executes", - " commands read from the standard input or from a file." - " Bash also", - " incorporates useful features from the Korn and C" - " shells (ksh and csh).", - " .", - " Bash is ultimately intended to be a conformant" - " implementation of the", - " IEEE POSIX Shell and Tools specification (IEEE" - " Working Group 1003.2).", - " .", - " The Programmable Completion Code, by Ian Macdonald," - " is now found in", - " the bash-completion package." + os.linesep, - ] - ), - "homepage": "http://tiswww.case.edu/php/chet/bash/bashtop.html", - "maintainer": ( - "Ubuntu Developers " - ), - "package": "bash", - "section": "shells", - "source": "bash", - "status": "ii", - "version": "4.4.18-2ubuntu1", - } - }, - ) diff --git a/tests/unit/modules/test_redismod.py b/tests/unit/modules/test_redismod.py deleted file mode 100644 index a40f7155a331..000000000000 --- a/tests/unit/modules/test_redismod.py +++ /dev/null @@ -1,456 +0,0 @@ -""" - :codeauthor: Jayesh Kariya -""" - -from datetime import datetime - -import salt.modules.redismod as redismod -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock -from tests.support.unit import TestCase - - -class Mockredis: - """ - Mock redis class - """ - - class ConnectionError(Exception): - """ - Mock ConnectionError class - """ - - -class MockConnect: - """ - Mock Connect class - """ - - counter = 0 - - def __init__(self): - self.name = None - self.pattern = None - self.value = None - self.key = None - self.seconds = None - self.timestamp = None - self.field = None - self.start = None - self.stop = None - self.master_host = None - self.master_port = None - - @staticmethod - def bgrewriteaof(): - """ - Mock bgrewriteaof method - """ - return "A" - - @staticmethod - def bgsave(): - """ - Mock bgsave method - """ - return "A" - - def config_get(self, pattern): - """ - Mock config_get method - """ - self.pattern = pattern - return "A" - - def config_set(self, name, value): - """ - Mock config_set method - """ - self.name = name - self.value = value - return "A" - - @staticmethod - def dbsize(): - """ - Mock dbsize method - """ - return "A" - - @staticmethod - def delete(): - """ - Mock delete method - """ - return "A" - - def exists(self, key): - """ - Mock exists method - """ - self.key = key - return "A" - - def expire(self, key, seconds): - """ - Mock expire method - """ - self.key = key - self.seconds = seconds - return "A" - - def expireat(self, key, timestamp): - """ - Mock expireat method - """ - self.key = key - self.timestamp = timestamp - return "A" - - @staticmethod - def flushall(): - """ - Mock flushall method - """ - return "A" - - @staticmethod - def flushdb(): - """ - Mock flushdb method - """ - return "A" - - def get(self, key): - """ - Mock get method - """ - self.key = key - return "A" - - def hget(self, key, field): - """ - Mock hget method - """ - self.key = key - self.field = field - return "A" - - def hgetall(self, key): - """ - Mock hgetall method - """ - self.key = key - return "A" - - @staticmethod - def info(): - """ - Mock info method - """ - return "A" - - def keys(self, pattern): - """ - Mock keys method - """ - self.pattern = pattern - return "A" - - def type(self, key): - """ - Mock type method - """ - self.key = key - return "A" - - @staticmethod - def lastsave(): - """ - Mock lastsave method - """ - return datetime.now() - - def llen(self, key): - """ - Mock llen method - """ - self.key = key - return "A" - - def lrange(self, key, start, stop): - """ - Mock lrange method - """ - self.key = key - self.start = start - self.stop = stop - return "A" - - @staticmethod - def ping(): - """ - Mock ping method - """ - MockConnect.counter = MockConnect.counter + 1 - if MockConnect.counter == 1: - return "A" - elif MockConnect.counter in (2, 3, 5): - raise Mockredis.ConnectionError("foo") - - @staticmethod - def save(): - """ - Mock save method - """ - return "A" - - def set(self, key, value): - """ - Mock set method - """ - self.key = key - self.value = value - return "A" - - @staticmethod - def shutdown(): - """ - Mock shutdown method - """ - return "A" - - def slaveof(self, master_host, master_port): - """ - Mock slaveof method - """ - self.master_host = master_host - self.master_port = master_port - return "A" - - def smembers(self, key): - """ - Mock smembers method - """ - self.key = key - return "A" - - @staticmethod - def time(): - """ - Mock time method - """ - return "A" - - def zcard(self, key): - """ - Mock zcard method - """ - self.key = key - return "A" - - def zrange(self, key, start, stop): - """ - Mock zrange method - """ - self.key = key - self.start = start - self.stop = stop - return "A" - - -class RedismodTestCase(TestCase, LoaderModuleMockMixin): - """ - Test cases for salt.modules.redismod - """ - - def setup_loader_modules(self): - return { - redismod: { - "redis": Mockredis, - "_connect": MagicMock(return_value=MockConnect()), - } - } - - def test_bgrewriteaof(self): - """ - Test to asynchronously rewrite the append-only file - """ - self.assertEqual(redismod.bgrewriteaof(), "A") - - def test_bgsave(self): - """ - Test to asynchronously save the dataset to disk - """ - self.assertEqual(redismod.bgsave(), "A") - - def test_config_get(self): - """ - Test to get redis server configuration values - """ - self.assertEqual(redismod.config_get("*"), "A") - - def test_config_set(self): - """ - Test to set redis server configuration values - """ - self.assertEqual(redismod.config_set("name", "value"), "A") - - def test_dbsize(self): - """ - Test to return the number of keys in the selected database - """ - self.assertEqual(redismod.dbsize(), "A") - - def test_delete(self): - """ - Test to deletes the keys from redis, returns number of keys deleted - """ - self.assertEqual(redismod.delete(), "A") - - def test_exists(self): - """ - Test to return true if the key exists in redis - """ - self.assertEqual(redismod.exists("key"), "A") - - def test_expire(self): - """ - Test to set a keys time to live in seconds - """ - self.assertEqual(redismod.expire("key", "seconds"), "A") - - def test_expireat(self): - """ - Test to set a keys expire at given UNIX time - """ - self.assertEqual(redismod.expireat("key", "timestamp"), "A") - - def test_flushall(self): - """ - Test to remove all keys from all databases - """ - self.assertEqual(redismod.flushall(), "A") - - def test_flushdb(self): - """ - Test to remove all keys from the selected database - """ - self.assertEqual(redismod.flushdb(), "A") - - def test_get_key(self): - """ - Test to get redis key value - """ - self.assertEqual(redismod.get_key("key"), "A") - - def test_hget(self): - """ - Test to get specific field value from a redis hash, returns dict - """ - self.assertEqual(redismod.hget("key", "field"), "A") - - def test_hgetall(self): - """ - Test to get all fields and values from a redis hash, returns dict - """ - self.assertEqual(redismod.hgetall("key"), "A") - - def test_info(self): - """ - Test to get information and statistics about the server - """ - self.assertEqual(redismod.info(), "A") - - def test_keys(self): - """ - Test to get redis keys, supports glob style patterns - """ - self.assertEqual(redismod.keys("pattern"), "A") - - def test_key_type(self): - """ - Test to get redis key type - """ - self.assertEqual(redismod.key_type("key"), "A") - - def test_lastsave(self): - """ - Test to get the UNIX time in seconds of the last successful - save to disk - """ - self.assertTrue(redismod.lastsave()) - - def test_llen(self): - """ - Test to get the length of a list in Redis - """ - self.assertEqual(redismod.llen("key"), "A") - - def test_lrange(self): - """ - Test to get a range of values from a list in Redis - """ - self.assertEqual(redismod.lrange("key", "start", "stop"), "A") - - def test_ping(self): - """ - Test to ping the server, returns False on connection errors - """ - self.assertEqual(redismod.ping(), "A") - - self.assertFalse(redismod.ping()) - - def test_save(self): - """ - Test to synchronously save the dataset to disk - """ - self.assertEqual(redismod.save(), "A") - - def test_set_key(self): - """ - Test to set redis key value - """ - self.assertEqual(redismod.set_key("key", "value"), "A") - - def test_shutdown(self): - """ - Test to synchronously save the dataset to disk and then - shut down the server - """ - self.assertFalse(redismod.shutdown()) - - self.assertTrue(redismod.shutdown()) - - self.assertFalse(redismod.shutdown()) - - def test_slaveof(self): - """ - Test to make the server a slave of another instance, or - promote it as master - """ - self.assertEqual(redismod.slaveof("master_host", "master_port"), "A") - - def test_smembers(self): - """ - Test to get members in a Redis set - """ - self.assertListEqual(redismod.smembers("key"), ["A"]) - - def test_time(self): - """ - Test to return the current server UNIX time in seconds - """ - self.assertEqual(redismod.time(), "A") - - def test_zcard(self): - """ - Test to get the length of a sorted set in Redis - """ - self.assertEqual(redismod.zcard("key"), "A") - - def test_zrange(self): - """ - Test to get a range of values from a sorted set in Redis by index - """ - self.assertEqual(redismod.zrange("key", "start", "stop"), "A") diff --git a/tests/unit/modules/test_serverdensity_device.py b/tests/unit/modules/test_serverdensity_device.py deleted file mode 100644 index eeec3f1242ce..000000000000 --- a/tests/unit/modules/test_serverdensity_device.py +++ /dev/null @@ -1,216 +0,0 @@ -""" - :codeauthor: Jayesh Kariya -""" - - -import salt.modules.serverdensity_device as serverdensity_device -import salt.utils.json -from salt.exceptions import CommandExecutionError -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - - -class MockRequests: - """ - Mock smtplib class - """ - - flag = None - content = """{"message": "Invalid token", "errors": [{"type": "invalid_token", "subject": "token"}]}""" - status_code = None - - def __init__(self): - self.url = None - self.data = None - self.kwargs = None - - def return_request(self, url, data=None, **kwargs): - """ - Mock request method. - """ - self.url = url - self.data = data - self.kwargs = kwargs - requests = MockRequests() - if self.flag == 1: - requests.status_code = 401 - else: - requests.status_code = 200 - return requests - - def post(self, url, data=None, **kwargs): - """ - Mock post method. - """ - return self.return_request(url, data, **kwargs) - - def delete(self, url, **kwargs): - """ - Mock delete method. - """ - return self.return_request(url, **kwargs) - - def get(self, url, **kwargs): - """ - Mock get method. - """ - return self.return_request(url, **kwargs) - - def put(self, url, data=None, **kwargs): - """ - Mock put method. - """ - return self.return_request(url, data, **kwargs) - - -class ServerdensityDeviceTestCase(TestCase, LoaderModuleMockMixin): - """ - TestCase for salt.modules.serverdensity_device - """ - - def setup_loader_modules(self): - return {serverdensity_device: {"requests": MockRequests()}} - - def setUp(self): - self.mock_json_loads = MagicMock(side_effect=ValueError()) - - # 'get_sd_auth' function tests: 1 - - def test_get_sd_auth(self): - """ - Tests if it returns requested Server Density - authentication value from pillar. - """ - with patch.dict(serverdensity_device.__pillar__, {"serverdensity": False}): - self.assertRaises( - CommandExecutionError, serverdensity_device.get_sd_auth, "1" - ) - - with patch.dict( - serverdensity_device.__pillar__, {"serverdensity": {"1": "salt"}} - ): - self.assertEqual(serverdensity_device.get_sd_auth("1"), "salt") - - self.assertRaises( - CommandExecutionError, serverdensity_device.get_sd_auth, "2" - ) - - # 'create' function tests: 1 - - def test_create(self): - """ - Tests if it create device in Server Density. - """ - with patch.dict( - serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} - ): - self.assertTrue(serverdensity_device.create("rich_lama", group="lama_band")) - - with patch.object(salt.utils.json, "loads", self.mock_json_loads): - self.assertRaises( - CommandExecutionError, - serverdensity_device.create, - "rich_lama", - group="lama_band", - ) - - MockRequests.flag = 1 - self.assertIsNone( - serverdensity_device.create("rich_lama", group="lama_band") - ) - - # 'delete' function tests: 1 - - def test_delete(self): - """ - Tests if it delete a device from Server Density. - """ - with patch.dict( - serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} - ): - MockRequests.flag = 0 - self.assertTrue(serverdensity_device.delete("51f7eaf")) - - with patch.object(salt.utils.json, "loads", self.mock_json_loads): - self.assertRaises( - CommandExecutionError, serverdensity_device.delete, "51f7eaf" - ) - - MockRequests.flag = 1 - self.assertIsNone(serverdensity_device.delete("51f7eaf")) - - # 'ls' function tests: 1 - - def test_ls(self): - """ - Tests if it list devices in Server Density. - """ - with patch.dict( - serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} - ): - MockRequests.flag = 0 - self.assertTrue(serverdensity_device.ls(name="lama")) - - with patch.object(salt.utils.json, "loads", self.mock_json_loads): - self.assertRaises( - CommandExecutionError, serverdensity_device.ls, name="lama" - ) - - MockRequests.flag = 1 - self.assertIsNone(serverdensity_device.ls(name="lama")) - - # 'update' function tests: 1 - - def test_update(self): - """ - Tests if it updates device information in Server Density. - """ - with patch.dict( - serverdensity_device.__pillar__, {"serverdensity": {"api_token": "salt"}} - ): - MockRequests.flag = 0 - self.assertTrue(serverdensity_device.update("51f7eaf", name="lama")) - - with patch.object(salt.utils.json, "loads", self.mock_json_loads): - self.assertRaises( - CommandExecutionError, - serverdensity_device.update, - "51f7eaf", - name="lama", - ) - - MockRequests.flag = 1 - self.assertIsNone(serverdensity_device.update("51f7eaf", name="lama")) - - # 'install_agent' function tests: 1 - - def test_install_agent(self): - """ - Tests if it downloads Server Density installation agent, - and installs sd-agent with agent_key. - """ - mock = MagicMock(return_value=True) - with patch.dict( - serverdensity_device.__pillar__, {"serverdensity": {"account_url": "salt"}} - ): - with patch.dict(serverdensity_device.__salt__, {"cmd.run": mock}): - with patch.dict(serverdensity_device.__opts__, {"cachedir": "/"}): - self.assertTrue(serverdensity_device.install_agent("51f7e")) - - # 'install_agent_v2' function tests: 1 - - def test_install_agent_v2(self): - """ - Tests if it downloads Server Density installation agent, - and installs sd-agent with agent_key. - """ - mock = MagicMock(return_value=True) - with patch.dict( - serverdensity_device.__pillar__, {"serverdensity": {"account_name": "salt"}} - ): - with patch.dict(serverdensity_device.__salt__, {"cmd.run": mock}): - with patch.dict(serverdensity_device.__opts__, {"cachedir": "/"}): - self.assertTrue( - serverdensity_device.install_agent("51f7e", agent_version=2) - ) diff --git a/tests/unit/modules/test_servicenow.py b/tests/unit/modules/test_servicenow.py deleted file mode 100644 index 4cb004dcc295..000000000000 --- a/tests/unit/modules/test_servicenow.py +++ /dev/null @@ -1,60 +0,0 @@ -""" - :codeauthor: Anthony Shaw -""" - - -import salt.modules.servicenow as servicenow -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock -from tests.support.unit import TestCase - - -class MockServiceNowClient: - def __init__(self, instance_name, username, password): - pass - - def get(self, query): - return [{"query_size": len(query), "query_value": query}] - - -class ServiceNowModuleTestCase(TestCase, LoaderModuleMockMixin): - def setup_loader_modules(self): - module_globals = { - "Client": MockServiceNowClient, - "__salt__": { - "config.option": MagicMock( - return_value={ - "instance_name": "test", - "username": "mr_test", - "password": "test123", - } - ) - }, - } - if servicenow.HAS_LIBS is False: - module_globals["sys.modules"] = {"servicenow_rest": MagicMock()} - module_globals["sys.modules"][ - "servicenow_rest" - ].api.Client = MockServiceNowClient - return {servicenow: module_globals} - - def test_module_creation(self): - client = servicenow._get_client() - self.assertFalse(client is None) - - def test_non_structured_query(self): - result = servicenow.non_structured_query("tests", "role=web") - self.assertFalse(result is None) - self.assertEqual(result[0]["query_size"], 8) - self.assertEqual(result[0]["query_value"], "role=web") - - def test_non_structured_query_kwarg(self): - result = servicenow.non_structured_query("tests", role="web") - self.assertFalse(result is None) - self.assertEqual(result[0]["query_size"], 8) - self.assertEqual(result[0]["query_value"], "role=web") - - def test_non_structured_query_kwarg_multi(self): - result = servicenow.non_structured_query("tests", role="web", type="computer") - self.assertFalse(result is None) - self.assertEqual(result[0]["query_size"], 22) diff --git a/tests/unit/modules/test_syslog_ng.py b/tests/unit/modules/test_syslog_ng.py deleted file mode 100644 index 0a45fc401c2f..000000000000 --- a/tests/unit/modules/test_syslog_ng.py +++ /dev/null @@ -1,347 +0,0 @@ -import os -from textwrap import dedent - -import pytest - -import salt.modules.syslog_ng as syslog_ng -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - -_VERSION = "3.6.0alpha0" -_MODULES = ( - "syslogformat,json-plugin,basicfuncs,afstomp,afsocket,cryptofuncs," - "afmongodb,dbparser,system-source,affile,pseudofile,afamqp," - "afsocket-notls,csvparser,linux-kmsg-format,afuser,confgen,afprog" -) - -VERSION_OUTPUT = """syslog-ng {0} -Installer-Version: {0} -Revision: -Compile-Date: Apr 4 2014 20:26:18 -Error opening plugin module; module='afsocket-tls', error='/home/tibi/install/syslog-ng/lib/syslog-ng/libafsocket-tls.so: undefined symbol: tls_context_setup_session' -Available-Modules: {1} -Enable-Debug: on -Enable-GProf: off -Enable-Memtrace: off -Enable-IPv6: on -Enable-Spoof-Source: off -Enable-TCP-Wrapper: off -Enable-Linux-Caps: off""".format( - _VERSION, _MODULES -) - -STATS_OUTPUT = """SourceName;SourceId;SourceInstance;State;Type;Number -center;;received;a;processed;0 -destination;#anon-destination0;;a;processed;0 -destination;#anon-destination1;;a;processed;0 -source;s_gsoc2014;;a;processed;0 -center;;queued;a;processed;0 -global;payload_reallocs;;a;processed;0 -global;sdata_updates;;a;processed;0 -global;msg_clones;;a;processed;0""" - -_SYSLOG_NG_NOT_INSTALLED_RETURN_VALUE = { - "retcode": -1, - "stderr": "Unable to execute the command 'syslog-ng'. It is not in the PATH.", -} -_SYSLOG_NG_CTL_NOT_INSTALLED_RETURN_VALUE = { - "retcode": -1, - "stderr": "Unable to execute the command 'syslog-ng-ctl'. It is not in the PATH.", -} - - -class SyslogNGTestCase(TestCase, LoaderModuleMockMixin): - - # pylint: disable=blacklisted-function - orig_env = {"PATH": "/foo:/bar"} - bin_dir = "/baz" - mocked_env = {"PATH": "/foo:/bar:/baz"} - # pylint: enable=blacklisted-function - - def setup_loader_modules(self): - return {syslog_ng: {}} - - def test_statement_without_options(self): - s = syslog_ng.Statement("source", "s_local", options=[]) - b = s.build() - self.assertEqual( - dedent( - """\ - source s_local { - }; - """ - ), - b, - ) - - def test_non_empty_statement(self): - o1 = syslog_ng.Option("file") - o2 = syslog_ng.Option("tcp") - s = syslog_ng.Statement("source", "s_local", options=[o1, o2]) - b = s.build() - self.assertEqual( - dedent( - """\ - source s_local { - file( - ); - tcp( - ); - }; - """ - ), - b, - ) - - def test_option_with_parameters(self): - o1 = syslog_ng.Option("file") - p1 = syslog_ng.SimpleParameter('"/var/log/messages"') - p2 = syslog_ng.SimpleParameter() - p3 = syslog_ng.TypedParameter() - p3.type = "tls" - p2.value = '"/var/log/syslog"' - o1.add_parameter(p1) - o1.add_parameter(p2) - o1.add_parameter(p3) - b = o1.build() - self.assertEqual( - dedent( - """\ - file( - "/var/log/messages", - "/var/log/syslog", - tls( - ) - ); - """ - ), - b, - ) - - def test_parameter_with_values(self): - p = syslog_ng.TypedParameter() - p.type = "tls" - v1 = syslog_ng.TypedParameterValue() - v1.type = "key_file" - - v2 = syslog_ng.TypedParameterValue() - v2.type = "cert_file" - - p.add_value(v1) - p.add_value(v2) - - b = p.build() - self.assertEqual( - dedent( - """\ - tls( - key_file( - ), - cert_file( - ) - )""" - ), - b, - ) - - def test_value_with_arguments(self): - t = syslog_ng.TypedParameterValue() - t.type = "key_file" - - a1 = syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"') - a2 = syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"') - - t.add_argument(a1) - t.add_argument(a2) - - b = t.build() - self.assertEqual( - dedent( - """\ - key_file( - "/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key" - "/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key" - )""" - ), - b, - ) - - def test_end_to_end_statement_generation(self): - s = syslog_ng.Statement("source", "s_tls") - - o = syslog_ng.Option("tcp") - - ip = syslog_ng.TypedParameter("ip") - ip.add_value(syslog_ng.SimpleParameterValue("'192.168.42.2'")) - o.add_parameter(ip) - - port = syslog_ng.TypedParameter("port") - port.add_value(syslog_ng.SimpleParameterValue(514)) - o.add_parameter(port) - - tls = syslog_ng.TypedParameter("tls") - key_file = syslog_ng.TypedParameterValue("key_file") - key_file.add_argument( - syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key"') - ) - cert_file = syslog_ng.TypedParameterValue("cert_file") - cert_file.add_argument( - syslog_ng.Argument('"/opt/syslog-ng/etc/syslog-ng/cert.d/syslog-ng.cert"') - ) - peer_verify = syslog_ng.TypedParameterValue("peer_verify") - peer_verify.add_argument(syslog_ng.Argument("optional-untrusted")) - tls.add_value(key_file) - tls.add_value(cert_file) - tls.add_value(peer_verify) - o.add_parameter(tls) - - s.add_child(o) - b = s.build() - self.assertEqual( - dedent( - """\ - source s_tls { - tcp( - ip( - '192.168.42.2' - ), - port( - 514 - ), - tls( - key_file( - "/opt/syslog-ng/etc/syslog-ng/key.d/syslog-ng.key" - ), - cert_file( - "/opt/syslog-ng/etc/syslog-ng/cert.d/syslog-ng.cert" - ), - peer_verify( - optional-untrusted - ) - ) - ); - }; - """ - ), - b, - ) - - @pytest.mark.skip_on_windows(reason="Module not available on Windows") - def test_version(self): - cmd_ret = {"retcode": 0, "stdout": VERSION_OUTPUT} - expected_output = {"retcode": 0, "stdout": _VERSION} - cmd_args = ["syslog-ng", "-V"] - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.version() - self.assertEqual(result, expected_output) - cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.version(syslog_ng_sbin_dir=self.bin_dir) - self.assertEqual(result, expected_output) - cmd_mock.assert_called_once_with( - cmd_args, env=self.mocked_env, python_shell=False - ) - - @pytest.mark.skip_on_windows(reason="Module not available on Windows") - def test_stats(self): - cmd_ret = {"retcode": 0, "stdout": STATS_OUTPUT} - cmd_args = ["syslog-ng-ctl", "stats"] - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.stats() - self.assertEqual(result, cmd_ret) - cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.stats(syslog_ng_sbin_dir=self.bin_dir) - self.assertEqual(result, cmd_ret) - cmd_mock.assert_called_once_with( - cmd_args, env=self.mocked_env, python_shell=False - ) - - @pytest.mark.skip_on_windows(reason="Module not available on Windows") - def test_modules(self): - cmd_ret = {"retcode": 0, "stdout": VERSION_OUTPUT} - expected_output = {"retcode": 0, "stdout": _MODULES} - cmd_args = ["syslog-ng", "-V"] - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.modules() - self.assertEqual(result, expected_output) - cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.modules(syslog_ng_sbin_dir=self.bin_dir) - self.assertEqual(result, expected_output) - cmd_mock.assert_called_once_with( - cmd_args, env=self.mocked_env, python_shell=False - ) - - @pytest.mark.skip_on_windows(reason="Module not available on Windows") - def test_config_test(self): - cmd_ret = {"retcode": 0, "stderr": "", "stdout": "Foo"} - cmd_args = ["syslog-ng", "--syntax-only"] - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.config_test() - self.assertEqual(result, cmd_ret) - cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - result = syslog_ng.config_test(syslog_ng_sbin_dir=self.bin_dir) - self.assertEqual(result, cmd_ret) - cmd_mock.assert_called_once_with( - cmd_args, env=self.mocked_env, python_shell=False - ) - - @pytest.mark.skip_on_windows(reason="Module not available on Windows") - def test_config_test_cfgfile(self): - cfgfile = "/path/to/syslog-ng.conf" - cmd_ret = {"retcode": 1, "stderr": "Syntax error...", "stdout": ""} - cmd_args = ["syslog-ng", "--syntax-only", "--cfgfile={}".format(cfgfile)] - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - self.assertEqual(syslog_ng.config_test(cfgfile=cfgfile), cmd_ret) - cmd_mock.assert_called_once_with(cmd_args, env=None, python_shell=False) - - cmd_mock = MagicMock(return_value=cmd_ret) - with patch.dict(syslog_ng.__salt__, {"cmd.run_all": cmd_mock}), patch.dict( - os.environ, self.orig_env - ): - self.assertEqual( - syslog_ng.config_test(syslog_ng_sbin_dir=self.bin_dir, cfgfile=cfgfile), - cmd_ret, - ) - cmd_mock.assert_called_once_with( - cmd_args, env=self.mocked_env, python_shell=False - ) diff --git a/tests/unit/modules/test_uwsgi.py b/tests/unit/modules/test_uwsgi.py deleted file mode 100644 index 2f5a735f10df..000000000000 --- a/tests/unit/modules/test_uwsgi.py +++ /dev/null @@ -1,23 +0,0 @@ -import salt.modules.uwsgi as uwsgi -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, Mock, patch -from tests.support.unit import TestCase - - -class UwsgiTestCase(TestCase, LoaderModuleMockMixin): - def setup_loader_modules(self): - patcher = patch("salt.utils.path.which", Mock(return_value="/usr/bin/uwsgi")) - patcher.start() - self.addCleanup(patcher.stop) - return {uwsgi: {}} - - def test_uwsgi_stats(self): - socket = "127.0.0.1:5050" - mock = MagicMock(return_value='{"a": 1, "b": 2}') - with patch.dict(uwsgi.__salt__, {"cmd.run": mock}): - result = uwsgi.stats(socket) - mock.assert_called_once_with( - ["uwsgi", "--connect-and-read", "{}".format(socket)], - python_shell=False, - ) - self.assertEqual(result, {"a": 1, "b": 2}) diff --git a/tests/unit/modules/test_vagrant.py b/tests/unit/modules/test_vagrant.py deleted file mode 100644 index ee7411de1b56..000000000000 --- a/tests/unit/modules/test_vagrant.py +++ /dev/null @@ -1,174 +0,0 @@ -import os - -import salt.exceptions -import salt.modules.vagrant as vagrant -import salt.utils.platform -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - -TEMP_DATABASE_FILE = "/tmp/salt-tests-tmpdir/test_vagrant.sqlite" - - -class VagrantTestCase(TestCase, LoaderModuleMockMixin): - """ - Unit TestCase for the salt.modules.vagrant module. - """ - - LOCAL_OPTS = { - "extension_modules": "", - "vagrant_sdb_data": { - "driver": "sqlite3", - "database": TEMP_DATABASE_FILE, - "table": "sdb", - "create_table": True, - }, - } - - def setup_loader_modules(self): - vagrant_globals = { - "__opts__": self.LOCAL_OPTS, - } - return {vagrant: vagrant_globals} - - def test_vagrant_get_vm_info_not_found(self): - mock_sdb = MagicMock(return_value=None) - with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}): - with self.assertRaises(salt.exceptions.SaltInvocationError): - vagrant.get_vm_info("thisNameDoesNotExist") - - def test_vagrant_init_positional(self): - path_nowhere = os.path.join(os.sep, "tmp", "nowhere") - if salt.utils.platform.is_windows(): - path_nowhere = "c:{}".format(path_nowhere) - mock_sdb = MagicMock(return_value=None) - with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): - resp = vagrant.init( - "test1", - path_nowhere, - "onetest", - "nobody", - False, - "french", - {"different": "very"}, - ) - self.assertTrue(resp.startswith("Name test1 defined")) - expected = dict( - name="test1", - cwd=path_nowhere, - machine="onetest", - runas="nobody", - vagrant_provider="french", - different="very", - ) - mock_sdb.assert_called_with( - "sdb://vagrant_sdb_data/onetest?{}".format(path_nowhere), - "test1", - self.LOCAL_OPTS, - ) - mock_sdb.assert_any_call( - "sdb://vagrant_sdb_data/test1", expected, self.LOCAL_OPTS - ) - - def test_vagrant_get_vm_info(self): - testdict = {"testone": "one", "machine": "two"} - mock_sdb = MagicMock(return_value=testdict) - with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}): - resp = vagrant.get_vm_info("test1") - self.assertEqual(resp, testdict) - - def test_vagrant_init_dict(self): - testdict = dict( - cwd="/tmp/anywhere", - machine="twotest", - runas="somebody", - vagrant_provider="english", - ) - expected = testdict.copy() - expected["name"] = "test2" - mock_sdb = MagicMock(return_value=None) - with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): - vagrant.init("test2", vm=testdict) - mock_sdb.assert_any_call( - "sdb://vagrant_sdb_data/test2", expected, self.LOCAL_OPTS - ) - - def test_vagrant_init_arg_override(self): - testdict = dict( - cwd="/tmp/there", - machine="treetest", - runas="anybody", - vagrant_provider="spansh", - ) - mock_sdb = MagicMock(return_value=None) - with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): - vagrant.init( - "test3", - cwd="/tmp", - machine="threetest", - runas="him", - vagrant_provider="polish", - vm=testdict, - ) - expected = dict( - name="test3", - cwd="/tmp", - machine="threetest", - runas="him", - vagrant_provider="polish", - ) - mock_sdb.assert_any_call( - "sdb://vagrant_sdb_data/test3", expected, self.LOCAL_OPTS - ) - - def test_vagrant_get_ssh_config_fails(self): - mock_sdb = MagicMock(return_value=None) - with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): - mock_sdb = MagicMock(return_value={}) - with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}): - vagrant.init("test3", cwd="/tmp") - with self.assertRaises(salt.exceptions.SaltInvocationError): - vagrant.get_ssh_config("test3") # has not been started - - def test_vagrant_destroy(self): - path_mydir = os.path.join(os.sep, "my", "dir") - if salt.utils.platform.is_windows(): - path_mydir = "c:{}".format(path_mydir) - mock_cmd = MagicMock(return_value={"retcode": 0}) - with patch.dict(vagrant.__salt__, {"cmd.run_all": mock_cmd}): - mock_sdb = MagicMock(return_value=None) - with patch.dict(vagrant.__utils__, {"sdb.sdb_delete": mock_sdb}): - mock_sdb_get = MagicMock( - return_value={"machine": "macfour", "cwd": path_mydir} - ) - with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb_get}): - self.assertTrue(vagrant.destroy("test4")) - mock_sdb.assert_any_call( - "sdb://vagrant_sdb_data/macfour?{}".format(path_mydir), - self.LOCAL_OPTS, - ) - mock_sdb.assert_any_call( - "sdb://vagrant_sdb_data/test4", self.LOCAL_OPTS - ) - cmd = "vagrant destroy -f macfour" - mock_cmd.assert_called_with( - cmd, runas=None, cwd=path_mydir, output_loglevel="info" - ) - - def test_vagrant_start(self): - mock_cmd = MagicMock(return_value={"retcode": 0}) - with patch.dict(vagrant.__salt__, {"cmd.run_all": mock_cmd}): - mock_sdb_get = MagicMock( - return_value={ - "machine": "five", - "cwd": "/the/dir", - "runas": "me", - "vagrant_provider": "him", - } - ) - with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb_get}): - self.assertTrue(vagrant.start("test5")) - cmd = "vagrant up five --provider=him" - mock_cmd.assert_called_with( - cmd, runas="me", cwd="/the/dir", output_loglevel="info" - ) diff --git a/tests/unit/modules/test_xfs.py b/tests/unit/modules/test_xfs.py deleted file mode 100644 index 47b75a045098..000000000000 --- a/tests/unit/modules/test_xfs.py +++ /dev/null @@ -1,113 +0,0 @@ -import textwrap - -import salt.modules.xfs as xfs -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - - -@patch("salt.modules.xfs._get_mounts", MagicMock(return_value={})) -class XFSTestCase(TestCase, LoaderModuleMockMixin): - """ - Test cases for salt.modules.xfs - """ - - def setup_loader_modules(self): - return {xfs: {}} - - def test__blkid_output(self): - """ - Test xfs._blkid_output when there is data - """ - blkid_export = textwrap.dedent( - """ - DEVNAME=/dev/sda1 - UUID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX - TYPE=xfs - PARTUUID=YYYYYYYY-YY - - DEVNAME=/dev/sdb1 - PARTUUID=ZZZZZZZZ-ZZZZ-ZZZZ-ZZZZ-ZZZZZZZZZZZZ - """ - ) - # We expect to find only data from /dev/sda1, nothig from - # /dev/sdb1 - self.assertEqual( - xfs._blkid_output(blkid_export), - { - "/dev/sda1": { - "label": None, - "partuuid": "YYYYYYYY-YY", - "uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", - } - }, - ) - - def test__parse_xfs_info(self): - """ - Test parsing output from mkfs.xfs. - """ - data = textwrap.dedent( - """ - meta-data=/dev/vg00/testvol isize=512 agcount=4, agsize=1310720 blks - = sectsz=4096 attr=2, projid32bit=1 - = crc=1 finobt=1, sparse=1, rmapbt=0 - = reflink=1 - data = bsize=4096 blocks=5242880, imaxpct=25 - = sunit=0 swidth=0 blks - naming =version 2 bsize=4096 ascii-ci=0, ftype=1 - log =internal log bsize=4096 blocks=2560, version=2 - = sectsz=4096 sunit=1 blks, lazy-count=1 - realtime =none extsz=4096 blocks=0, rtextents=0 - Discarding blocks...Done. - """ - ) - - self.assertEqual( - xfs._parse_xfs_info(data), - { - "meta-data": { - "section": "/dev/vg00/testvol", - "isize": "512", - "agcount": "4", - "agsize": "1310720 blks", - "sectsz": "4096", - "attr": "2", - "projid32bit": "1", - "crc": "1", - "finobt": "1", - "sparse": "1", - "rmapbt": "0", - "reflink": "1", - }, - "data": { - "section": "data", - "bsize": "4096", - "blocks": "5242880", - "imaxpct": "25", - "sunit": "0", - "swidth": "0 blks", - }, - "naming": { - "section": "version 2", - "bsize": "4096", - "ascii-ci": "0", - "ftype": "1", - }, - "log": { - "section": "internal log", - "bsize": "4096", - "blocks": "2560", - "version": "2", - "sectsz": "4096", - "sunit": "1 blks", - "lazy-count": "1", - }, - "realtime": { - "section": "none", - "extsz": "4096", - "blocks": "0", - "rtextents": "0", - }, - }, - ) diff --git a/tests/unit/states/test_gem.py b/tests/unit/states/test_gem.py deleted file mode 100644 index 90f48a518903..000000000000 --- a/tests/unit/states/test_gem.py +++ /dev/null @@ -1,134 +0,0 @@ -# Late import so mock can do its job -import salt.states.gem as gem -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - - -class TestGemState(TestCase, LoaderModuleMockMixin): - def setup_loader_modules(self): - return {gem: {"__opts__": {"test": False}}} - - def test_installed(self): - gems = {"foo": ["1.0"], "bar": ["2.0"]} - gem_list = MagicMock(return_value=gems) - gem_install_succeeds = MagicMock(return_value=True) - gem_install_fails = MagicMock(return_value=False) - - with patch.dict(gem.__salt__, {"gem.list": gem_list}): - with patch.dict(gem.__salt__, {"gem.install": gem_install_succeeds}): - ret = gem.installed("foo") - self.assertEqual(True, ret["result"]) - ret = gem.installed("quux") - self.assertEqual(True, ret["result"]) - gem_install_succeeds.assert_called_once_with( - "quux", - pre_releases=False, - ruby=None, - runas=None, - version=None, - proxy=None, - rdoc=False, - source=None, - ri=False, - gem_bin=None, - ) - - with patch.dict(gem.__salt__, {"gem.install": gem_install_fails}): - ret = gem.installed("quux") - self.assertEqual(False, ret["result"]) - gem_install_fails.assert_called_once_with( - "quux", - pre_releases=False, - ruby=None, - runas=None, - version=None, - proxy=None, - rdoc=False, - source=None, - ri=False, - gem_bin=None, - ) - - def test_installed_version(self): - gems = {"foo": ["1.0"], "bar": ["2.0"]} - gem_list = MagicMock(return_value=gems) - gem_install_succeeds = MagicMock(return_value=True) - - with patch.dict(gem.__salt__, {"gem.list": gem_list}): - with patch.dict(gem.__salt__, {"gem.install": gem_install_succeeds}): - ret = gem.installed("foo", version=">= 1.0") - self.assertEqual(True, ret["result"]) - self.assertEqual( - "Installed Gem meets version requirements.", ret["comment"] - ) - - def test_removed(self): - gems = ["foo", "bar"] - gem_list = MagicMock(return_value=gems) - gem_uninstall_succeeds = MagicMock(return_value=True) - gem_uninstall_fails = MagicMock(return_value=False) - with patch.dict(gem.__salt__, {"gem.list": gem_list}): - with patch.dict(gem.__salt__, {"gem.uninstall": gem_uninstall_succeeds}): - ret = gem.removed("quux") - self.assertEqual(True, ret["result"]) - ret = gem.removed("foo") - self.assertEqual(True, ret["result"]) - gem_uninstall_succeeds.assert_called_once_with( - "foo", None, runas=None, gem_bin=None - ) - - with patch.dict(gem.__salt__, {"gem.uninstall": gem_uninstall_fails}): - ret = gem.removed("bar") - self.assertEqual(False, ret["result"]) - gem_uninstall_fails.assert_called_once_with( - "bar", None, runas=None, gem_bin=None - ) - - def test_sources_add(self): - gem_sources = ["http://foo", "http://bar"] - gem_sources_list = MagicMock(return_value=gem_sources) - gem_sources_add_succeeds = MagicMock(return_value=True) - gem_sources_add_fails = MagicMock(return_value=False) - with patch.dict(gem.__salt__, {"gem.sources_list": gem_sources_list}): - with patch.dict( - gem.__salt__, {"gem.sources_add": gem_sources_add_succeeds} - ): - ret = gem.sources_add("http://foo") - self.assertEqual(True, ret["result"]) - ret = gem.sources_add("http://fui") - self.assertEqual(True, ret["result"]) - gem_sources_add_succeeds.assert_called_once_with( - source_uri="http://fui", ruby=None, runas=None - ) - with patch.dict(gem.__salt__, {"gem.sources_add": gem_sources_add_fails}): - ret = gem.sources_add("http://fui") - self.assertEqual(False, ret["result"]) - gem_sources_add_fails.assert_called_once_with( - source_uri="http://fui", ruby=None, runas=None - ) - - def test_sources_remove(self): - gem_sources = ["http://foo", "http://bar"] - gem_sources_list = MagicMock(return_value=gem_sources) - gem_sources_remove_succeeds = MagicMock(return_value=True) - gem_sources_remove_fails = MagicMock(return_value=False) - with patch.dict(gem.__salt__, {"gem.sources_list": gem_sources_list}): - with patch.dict( - gem.__salt__, {"gem.sources_remove": gem_sources_remove_succeeds} - ): - ret = gem.sources_remove("http://fui") - self.assertEqual(True, ret["result"]) - ret = gem.sources_remove("http://foo") - self.assertEqual(True, ret["result"]) - gem_sources_remove_succeeds.assert_called_once_with( - source_uri="http://foo", ruby=None, runas=None - ) - with patch.dict( - gem.__salt__, {"gem.sources_remove": gem_sources_remove_fails} - ): - ret = gem.sources_remove("http://bar") - self.assertEqual(False, ret["result"]) - gem_sources_remove_fails.assert_called_once_with( - source_uri="http://bar", ruby=None, runas=None - ) diff --git a/tests/unit/states/test_glusterfs.py b/tests/unit/states/test_glusterfs.py deleted file mode 100644 index 5204d3607aa2..000000000000 --- a/tests/unit/states/test_glusterfs.py +++ /dev/null @@ -1,451 +0,0 @@ -""" - :codeauthor: Jayesh Kariya -""" - -import salt.modules.glusterfs as mod_glusterfs -import salt.states.glusterfs as glusterfs -import salt.utils.cloud -import salt.utils.network -from tests.support.mixins import LoaderModuleMockMixin -from tests.support.mock import MagicMock, patch -from tests.support.unit import TestCase - - -class GlusterfsTestCase(TestCase, LoaderModuleMockMixin): - """ - Test cases for salt.states.glusterfs - """ - - def setup_loader_modules(self): - return {glusterfs: {"__salt__": {"glusterfs.peer": mod_glusterfs.peer}}} - - # 'peered' function tests: 1 - - def test_peered(self): - """ - Test to verify if node is peered. - """ - name = "server1" - - ret = {"name": name, "result": True, "comment": "", "changes": {}} - - mock_ip = MagicMock(return_value=["1.2.3.4", "1.2.3.5"]) - mock_ip6 = MagicMock(return_value=["2001:db8::1"]) - mock_host_ips = MagicMock(return_value=["1.2.3.5"]) - mock_peer = MagicMock(return_value=True) - mock_status = MagicMock(return_value={"uuid1": {"hostnames": [name]}}) - - with patch.dict( - glusterfs.__salt__, - {"glusterfs.peer_status": mock_status, "glusterfs.peer": mock_peer}, - ): - with patch.object(salt.utils.network, "ip_addrs", mock_ip), patch.object( - salt.utils.network, "ip_addrs6", mock_ip6 - ), patch.object(salt.utils.network, "host_to_ips", mock_host_ips): - comt = "Peering with localhost is not needed" - ret.update({"comment": comt}) - self.assertDictEqual(glusterfs.peered(name), ret) - - mock_host_ips.return_value = ["127.0.1.1"] - comt = "Peering with localhost is not needed" - ret.update({"comment": comt}) - self.assertDictEqual(glusterfs.peered(name), ret) - - mock_host_ips.return_value = ["2001:db8::1"] - self.assertDictEqual(glusterfs.peered(name), ret) - - mock_host_ips.return_value = ["1.2.3.42"] - comt = "Host {} already peered".format(name) - ret.update({"comment": comt}) - self.assertDictEqual(glusterfs.peered(name), ret) - - with patch.dict(glusterfs.__opts__, {"test": False}): - old = {"uuid1": {"hostnames": ["other1"]}} - new = { - "uuid1": {"hostnames": ["other1"]}, - "uuid2": {"hostnames": ["someAlias", name]}, - } - mock_status.side_effect = [old, new] - comt = "Host {} successfully peered".format(name) - ret.update({"comment": comt, "changes": {"old": old, "new": new}}) - self.assertDictEqual(glusterfs.peered(name), ret) - mock_status.side_effect = None - - mock_status.return_value = {"uuid1": {"hostnames": ["other"]}} - mock_peer.return_value = False - - ret.update({"result": False}) - - comt = ( - "Failed to peer with {0}," + " please check logs for errors" - ).format(name) - ret.update({"comment": comt, "changes": {}}) - self.assertDictEqual(glusterfs.peered(name), ret) - - comt = "Invalid characters in peer name." - ret.update({"comment": comt, "name": ":/"}) - self.assertDictEqual(glusterfs.peered(":/"), ret) - ret.update({"name": name}) - - with patch.dict(glusterfs.__opts__, {"test": True}): - comt = "Peer {} will be added.".format(name) - ret.update({"comment": comt, "result": None}) - self.assertDictEqual(glusterfs.peered(name), ret) - - # 'volume_present' function tests: 1 - - def test_volume_present(self): - """ - Test to ensure that a volume exists - """ - name = "salt" - bricks = ["host1:/brick1"] - ret = {"name": name, "result": True, "comment": "", "changes": {}} - - started_info = {name: {"status": "1"}} - stopped_info = {name: {"status": "0"}} - - mock_info = MagicMock() - mock_list = MagicMock() - mock_create = MagicMock() - mock_start = MagicMock(return_value=True) - - with patch.dict( - glusterfs.__salt__, - { - "glusterfs.info": mock_info, - "glusterfs.list_volumes": mock_list, - "glusterfs.create_volume": mock_create, - "glusterfs.start_volume": mock_start, - }, - ): - with patch.dict(glusterfs.__opts__, {"test": False}): - mock_list.return_value = [name] - mock_info.return_value = started_info - comt = "Volume {} already exists and is started".format(name) - ret.update({"comment": comt}) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=True), ret - ) - - mock_info.return_value = stopped_info - comt = "Volume {} already exists and is now started".format(name) - ret.update( - {"comment": comt, "changes": {"old": "stopped", "new": "started"}} - ) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=True), ret - ) - - comt = "Volume {} already exists".format(name) - ret.update({"comment": comt, "changes": {}}) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=False), ret - ) - with patch.dict(glusterfs.__opts__, {"test": True}): - comt = "Volume {} already exists".format(name) - ret.update({"comment": comt, "result": None}) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=False), ret - ) - - comt = ("Volume {0} already exists" + " and will be started").format( - name - ) - ret.update({"comment": comt, "result": None}) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=True), ret - ) - - mock_list.return_value = [] - comt = "Volume {} will be created".format(name) - ret.update({"comment": comt, "result": None}) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=False), ret - ) - - comt = ("Volume {0} will be created" + " and started").format(name) - ret.update({"comment": comt, "result": None}) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=True), ret - ) - - with patch.dict(glusterfs.__opts__, {"test": False}): - mock_list.side_effect = [[], [name]] - comt = "Volume {} is created".format(name) - ret.update( - { - "comment": comt, - "result": True, - "changes": {"old": [], "new": [name]}, - } - ) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=False), ret - ) - - mock_list.side_effect = [[], [name]] - comt = "Volume {} is created and is now started".format(name) - ret.update({"comment": comt, "result": True}) - self.assertDictEqual( - glusterfs.volume_present(name, bricks, start=True), ret - ) - - mock_list.side_effect = None - mock_list.return_value = [] - mock_create.return_value = False - comt = "Creation of volume {} failed".format(name) - ret.update({"comment": comt, "result": False, "changes": {}}) - self.assertDictEqual(glusterfs.volume_present(name, bricks), ret) - - with patch.object( - salt.utils.cloud, "check_name", MagicMock(return_value=True) - ): - comt = "Invalid characters in volume name." - ret.update({"comment": comt, "result": False}) - self.assertDictEqual(glusterfs.volume_present(name, bricks), ret) - - # 'started' function tests: 1 - - def test_started(self): - """ - Test to check if volume has been started - """ - name = "salt" - - ret = {"name": name, "result": False, "comment": "", "changes": {}} - - started_info = {name: {"status": "1"}} - stopped_info = {name: {"status": "0"}} - mock_info = MagicMock(return_value={}) - mock_start = MagicMock(return_value=True) - - with patch.dict( - glusterfs.__salt__, - {"glusterfs.info": mock_info, "glusterfs.start_volume": mock_start}, - ): - comt = "Volume {} does not exist".format(name) - ret.update({"comment": comt}) - self.assertDictEqual(glusterfs.started(name), ret) - - mock_info.return_value = started_info - comt = "Volume {} is already started".format(name) - ret.update({"comment": comt, "result": True}) - self.assertDictEqual(glusterfs.started(name), ret) - - with patch.dict(glusterfs.__opts__, {"test": True}): - mock_info.return_value = stopped_info - comt = "Volume {} will be started".format(name) - ret.update({"comment": comt, "result": None}) - self.assertDictEqual(glusterfs.started(name), ret) - - with patch.dict(glusterfs.__opts__, {"test": False}): - comt = "Volume {} is started".format(name) - ret.update( - { - "comment": comt, - "result": True, - "change": {"new": "started", "old": "stopped"}, - } - ) - self.assertDictEqual(glusterfs.started(name), ret) - - # 'add_volume_bricks' function tests: 1 - - def test_add_volume_bricks(self): - """ - Test to add brick(s) to an existing volume - """ - name = "salt" - bricks = ["host1:/drive1"] - old_bricks = ["host1:/drive2"] - - ret = {"name": name, "result": False, "comment": "", "changes": {}} - - stopped_volinfo = {"salt": {"status": "0"}} - volinfo = { - "salt": {"status": "1", "bricks": {"brick1": {"path": old_bricks[0]}}} - } - new_volinfo = { - "salt": { - "status": "1", - "bricks": { - "brick1": {"path": old_bricks[0]}, - "brick2": {"path": bricks[0]}, - }, - } - } - - mock_info = MagicMock(return_value={}) - mock_add = MagicMock(side_effect=[False, True]) - - with patch.dict( - glusterfs.__salt__, - {"glusterfs.info": mock_info, "glusterfs.add_volume_bricks": mock_add}, - ): - ret.update({"comment": "Volume salt does not exist"}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) - - mock_info.return_value = stopped_volinfo - ret.update({"comment": "Volume salt is not started"}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) - - mock_info.return_value = volinfo - ret.update({"comment": "Adding bricks to volume salt failed"}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, bricks), ret) - - ret.update({"result": True}) - ret.update({"comment": "Bricks already added in volume salt"}) - self.assertDictEqual(glusterfs.add_volume_bricks(name, old_bricks), ret) - - mock_info.side_effect = [volinfo, new_volinfo] - ret.update( - { - "comment": "Bricks successfully added to volume salt", - "changes": {"new": bricks + old_bricks, "old": old_bricks}, - } - ) - # Let's sort ourselves because the test under python 3 sometimes fails - # just because of the new changes list order - result = glusterfs.add_volume_bricks(name, bricks) - ret["changes"]["new"] = sorted(ret["changes"]["new"]) - result["changes"]["new"] = sorted(result["changes"]["new"]) - self.assertDictEqual(result, ret) - - # 'op_version' function tests: 1 - - def test_op_version(self): - """ - Test setting the Glusterfs op-version - """ - name = "salt" - current = 30707 - new = 31200 - - ret = {"name": name, "result": False, "comment": "", "changes": {}} - - mock_get_version = MagicMock(return_value={}) - mock_set_version = MagicMock(return_value={}) - - with patch.dict( - glusterfs.__salt__, - { - "glusterfs.get_op_version": mock_get_version, - "glusterfs.set_op_version": mock_set_version, - }, - ): - mock_get_version.return_value = [False, "some error message"] - ret.update({"result": False}) - ret.update({"comment": "some error message"}) - self.assertDictEqual(glusterfs.op_version(name, current), ret) - - mock_get_version.return_value = current - ret.update({"result": True}) - ret.update( - { - "comment": ( - "Glusterfs cluster.op-version for {} already set to {}".format( - name, current - ) - ) - } - ) - self.assertDictEqual(glusterfs.op_version(name, current), ret) - - with patch.dict(glusterfs.__opts__, {"test": True}): - mock_set_version.return_value = [False, "Failed to set version"] - ret.update({"result": None}) - ret.update( - { - "comment": ( - "An attempt would be made to set the cluster.op-version for" - " {} to {}.".format(name, new) - ) - } - ) - self.assertDictEqual(glusterfs.op_version(name, new), ret) - - with patch.dict(glusterfs.__opts__, {"test": False}): - mock_set_version.return_value = [False, "Failed to set version"] - ret.update({"result": False}) - ret.update({"comment": "Failed to set version"}) - self.assertDictEqual(glusterfs.op_version(name, new), ret) - - mock_set_version.return_value = "some success message" - ret.update({"comment": "some success message"}) - ret.update({"changes": {"old": current, "new": new}}) - ret.update({"result": True}) - self.assertDictEqual(glusterfs.op_version(name, new), ret) - - # 'max_op_version' function tests: 1 - - def test_max_op_version(self): - """ - Test setting the Glusterfs to its self reported max-op-version - """ - name = "salt" - current = 30707 - new = 31200 - - ret = {"name": name, "result": False, "comment": "", "changes": {}} - - mock_get_version = MagicMock(return_value={}) - mock_get_max_op_version = MagicMock(return_value={}) - mock_set_version = MagicMock(return_value={}) - - with patch.dict( - glusterfs.__salt__, - { - "glusterfs.get_op_version": mock_get_version, - "glusterfs.set_op_version": mock_set_version, - "glusterfs.get_max_op_version": mock_get_max_op_version, - }, - ): - mock_get_version.return_value = [False, "some error message"] - ret.update({"result": False}) - ret.update({"comment": "some error message"}) - self.assertDictEqual(glusterfs.max_op_version(name), ret) - - mock_get_version.return_value = current - mock_get_max_op_version.return_value = [False, "some error message"] - ret.update({"result": False}) - ret.update({"comment": "some error message"}) - self.assertDictEqual(glusterfs.max_op_version(name), ret) - - mock_get_version.return_value = current - mock_get_max_op_version.return_value = current - ret.update({"result": True}) - ret.update( - { - "comment": ( - "The cluster.op-version is already set to the" - " cluster.max-op-version of {}".format(current) - ) - } - ) - self.assertDictEqual(glusterfs.max_op_version(name), ret) - - with patch.dict(glusterfs.__opts__, {"test": True}): - mock_get_max_op_version.return_value = new - ret.update({"result": None}) - ret.update( - { - "comment": ( - "An attempt would be made to set the cluster.op-version" - " to {}.".format(new) - ) - } - ) - self.assertDictEqual(glusterfs.max_op_version(name), ret) - - with patch.dict(glusterfs.__opts__, {"test": False}): - mock_set_version.return_value = [False, "Failed to set version"] - ret.update({"result": False}) - ret.update({"comment": "Failed to set version"}) - self.assertDictEqual(glusterfs.max_op_version(name), ret) - - mock_set_version.return_value = "some success message" - ret.update({"comment": "some success message"}) - ret.update({"changes": {"old": current, "new": new}}) - ret.update({"result": True}) - self.assertDictEqual(glusterfs.max_op_version(name), ret) diff --git a/tools/testsuite/__init__.py b/tools/testsuite/__init__.py index 4ae4e07527cc..d09d29be9792 100644 --- a/tools/testsuite/__init__.py +++ b/tools/testsuite/__init__.py @@ -151,15 +151,6 @@ def setup_testsuite( if exitcode and exitcode != ExitCode.SOFT_FAIL: ctx.exit(exitcode) if pkg: - exitcode = tools.utils.gh.download_nox_artifact( - ctx, - run_id=run_id, - slug=slug, - nox_env=f"test-pkgs-onedir-{arch}", - repository=repository, - ) - if exitcode and exitcode != ExitCode.SOFT_FAIL: - ctx.exit(exitcode) exitcode = tools.utils.gh.download_pkgs_artifact( ctx, run_id=run_id, diff --git a/tools/utils/gh.py b/tools/utils/gh.py index 34008936e4a0..513e2cf2110c 100644 --- a/tools/utils/gh.py +++ b/tools/utils/gh.py @@ -195,7 +195,7 @@ def download_pkgs_artifact( ctx.error(f"We do not build packages for {slug}") return ExitCode.FAIL - artifacts_path = tools.utils.REPO_ROOT / "pkg" / "artifacts" + artifacts_path = tools.utils.REPO_ROOT / "artifacts" / "pkg" artifacts_path.mkdir(exist_ok=True) ctx.info( diff --git a/tools/vm.py b/tools/vm.py index a8fa51ea7486..11eed3f99189 100644 --- a/tools/vm.py +++ b/tools/vm.py @@ -1315,7 +1315,7 @@ def upload_checkout(self, verbose=True, download=False): "--include", "artifacts/salt", "--include", - "pkg/artifacts/*", + "artifacts/pkg", # But we also want to exclude all other entries under artifacts/ "--exclude", "artifacts/*",