diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..282ed09183 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +docs/* +keep-ui/node_modules +keep-ui/.next/* +keep-ui/.env.local +.venv/ +.vercel/ +.vscode/ +.github/ diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..384985b3f7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "[๐Ÿ› Bug]: " +labels: "" +assignees: "" +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: + +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 0000000000..cc2c7fd0e5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,10 @@ +--- +name: Documentation issue +about: Any issue related with Keep's documentation +title: "[๐Ÿ“ƒ Docs]: " +labels: "Documentation" +assignees: "" +--- + +**Describe the documentation change** +Add any context about the documentation change you aim to do. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..a9a650ab93 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,19 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "[โž• Feature]: " +labels: "" +assignees: "" +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/new_provider_request.md b/.github/ISSUE_TEMPLATE/new_provider_request.md new file mode 100644 index 0000000000..4c9b6c99d0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/new_provider_request.md @@ -0,0 +1,19 @@ +--- +name: New provider request +about: Suggest a new provider for keep +title: "[๐Ÿ”Œ Provider]: " +labels: "Provider" +assignees: "" +--- + +**Describe the provider you want to add** +Add any context about the tool and the kind of data you would want to pull/push from the provider. + +**Describe your use case** +Does this integration will help you to use Keep? + +**Are you already using Keep?** +Yes/No + +**Additional context** +Add any other context or screenshots about the provider request here. diff --git a/.github/ISSUE_TEMPLATE/use_case.md b/.github/ISSUE_TEMPLATE/use_case.md new file mode 100644 index 0000000000..64b10471b5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/use_case.md @@ -0,0 +1,11 @@ +--- +name: Use case +about: Tell us how you use Keep and we will add it to the docs. +title: '' +labels: '' +assignees: '' + +--- + +**What do you use Keep for?** +A clear and concise description of what you do with Keep. diff --git a/.github/workflows/developer_onboarding_notification.yml b/.github/workflows/developer_onboarding_notification.yml new file mode 100644 index 0000000000..0ed44fca48 --- /dev/null +++ b/.github/workflows/developer_onboarding_notification.yml @@ -0,0 +1,116 @@ +name: Celebrating Contributions + +on: + pull_request_target: + types: [closed] + +permissions: + pull-requests: write + +jobs: + comment_on_merged_pull_request: + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Set Environment Variables + env: + AUTHOR: ${{ github.event.pull_request.user.login }} + REPO: ${{ github.event.repository.name }} + OWNER: ${{ github.event.repository.owner.login }} + run: | + echo "AUTHOR=${AUTHOR}" >> $GITHUB_ENV + echo "REPO=${REPO}" >> $GITHUB_ENV + echo "OWNER=${OWNER}" >> $GITHUB_ENV + + - name: Count Merged Pull Requests + id: count_merged_pull_requests + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + try { + const author = process.env.AUTHOR; + const repo = process.env.REPO; + const owner = process.env.OWNER; + const { data } = await github.rest.search.issuesAndPullRequests({ + q: `repo:${owner}/${repo} type:pr state:closed author:${author}` + }); + const prCount = data.items.filter(pr => pr.pull_request.merged_at).length; + core.exportVariable('PR_COUNT', prCount); + } catch (error) { + core.setFailed(`Error counting merged pull requests: ${error.message}`); + } + + - name: Comment on the Merged Pull Request + uses: actions/github-script@v6 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + try { + const prCount = parseInt(process.env.PR_COUNT); + const author = process.env.AUTHOR; + const mention = 'talboren'; + const prNumber = context.payload.pull_request.number; + const repo = process.env.REPO; + + function getRandomEmoji() { + const emojis = ['๐ŸŽ‰', '๐Ÿš€', '๐Ÿ’ช', '๐ŸŒŸ', '๐Ÿ†', '๐ŸŽŠ', '๐Ÿ”ฅ', '๐Ÿ‘', '๐ŸŒˆ', '๐Ÿš‚']; + return emojis[Math.floor(Math.random() * emojis.length)]; + } + + function getMessage(count) { + const emoji = getRandomEmoji(); + switch(count) { + case 1: + return `${emoji} **Fantastic work @${author}!** Your very first PR to ${repo} has been merged! ๐ŸŽ‰๐Ÿฅณ\n\n` + + `You've just taken your first step into open-source, and we couldn't be happier to have you onboard. ๐Ÿ™Œ\n` + + `If you're feeling adventurous, why not dive into another issue and keep contributing? The community would love to see more from you! ๐Ÿš€\n\n` + + `For any support, feel free to reach out to the developer onboarding lead: @${mention}. Happy coding! ๐Ÿ‘ฉโ€๐Ÿ’ป๐Ÿ‘จโ€๐Ÿ’ป`; + case 2: + return `${emoji} **Well done @${author}!** Two PRs merged already! ๐ŸŽ‰๐Ÿฅณ\n\n` + + `With your second PR, you're on a roll, and your contributions are already making a difference. ๐ŸŒŸ\n` + + `Looking forward to seeing even more contributions from you. The developer onboarding lead: @${mention} is here if you need any help! Keep up the great work! ๐Ÿš€`; + case 3: + return `${emoji} **You're on fire, @${author}!** Three PRs merged and counting! ๐Ÿ”ฅ๐ŸŽ‰\n\n` + + `Your consistent contributions are truly impressive. You're becoming a valued member of our community! ๐Ÿ’–\n` + + `Have you considered taking on some more challenging issues? We'd love to see what you can do! ๐Ÿ’ช\n\n` + + `Remember, @${mention} is always here to support you. Keep blazing that trail! ๐Ÿš€`; + case 5: + return `${emoji} **High five, @${author}!** You've hit the incredible milestone of 5 merged PRs! ๐Ÿ–๏ธโœจ\n\n` + + `Your dedication to ${repo} is outstanding. You're not just contributing code; you're shaping the future of this project! ๐ŸŒ \n` + + `We'd love to hear your thoughts on the project. Any ideas for new features or improvements? ๐Ÿค”\n\n` + + `@${mention} and the whole team applaud your efforts. You're a superstar! ๐ŸŒŸ`; + case 10: + return `${emoji} **Double digits, @${author}!** 10 merged PRs is a massive achievement! ๐Ÿ†๐ŸŽŠ\n\n` + + `Your impact on ${repo} is undeniable. You've become a pillar of our community! ๐Ÿ›๏ธ\n` + + `We'd be thrilled to have you take on a mentorship role for newer contributors. Interested? ๐Ÿง‘โ€๐Ÿซ\n\n` + + `@${mention} and everyone here are in awe of your contributions. You're an open source hero! ๐Ÿฆธโ€โ™€๏ธ๐Ÿฆธโ€โ™‚๏ธ`; + default: + if (count > 10) { + return `${emoji} **Incredible, @${author}!** You've merged your ${count}th PR! ๐ŸŽฏ๐ŸŽŠ\n\n` + + `Your ongoing commitment to ${repo} is truly remarkable. You're a driving force in our community! ๐Ÿš€\n` + + `Your contributions are helping to shape the future of this project. What exciting features or improvements do you envision next? ๐Ÿ”ฎ\n\n` + + `@${mention} and the entire team are grateful for your dedication. You're an inspiration to us all! ๐Ÿ’ซ`; + } else { + return `${emoji} **Great job, @${author}!** You've merged your ${count}th PR! ๐ŸŽŠ\n\n` + + `Your contributions to ${repo} are making a real difference. Keep up the fantastic work! ๐Ÿ’ช\n` + + `Remember, every PR counts and helps improve the project. What will you tackle next? ๐Ÿค”\n\n` + + `@${mention} is here if you need any guidance. Onward and upward! ๐Ÿš€`; + } + } + } + + const message = getMessage(prCount); + + await github.rest.issues.createComment({ + owner: process.env.OWNER, + repo: process.env.REPO, + issue_number: prNumber, + body: message + }); + } catch (error) { + core.setFailed(`Error creating comment: ${error.message}`); + } \ No newline at end of file diff --git a/.github/workflows/lint-pr.yml b/.github/workflows/lint-pr.yml new file mode 100644 index 0000000000..4bb1693026 --- /dev/null +++ b/.github/workflows/lint-pr.yml @@ -0,0 +1,56 @@ +name: "Lint PR" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + - reopened + +jobs: + main: + name: Validate PR title + runs-on: ubuntu-latest + steps: + - name: lint_pr_title + id: lint_pr_title + uses: amannn/action-semantic-pull-request@v5.1.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - uses: marocchino/sticky-pull-request-comment@v2 + # When the previous steps fails, the workflow would stop. By adding this + # condition you can continue the execution with the populated error message. + if: always() && (steps.lint_pr_title.outputs.error_message != null) + with: + header: pr-title-lint-error + message: | + Hey there and thank you for opening this pull request! ๐Ÿ‘‹๐Ÿผ + + We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. + + Details: + + ``` + ${{ steps.lint_pr_title.outputs.error_message }} + ``` + # Delete a previous comment when the issue has been resolved + - if: ${{ steps.lint_pr_title.outputs.error_message == null }} + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: pr-title-lint-error + delete: true + links: + runs-on: ubuntu-latest + name: Validate PR to Issue link + permissions: + issues: read + pull-requests: write + steps: + - uses: nearform-actions/github-action-check-linked-issues@v1 + id: check-linked-issues + with: + exclude-branches: "release/**, dependabot/**" + # OPTIONAL: Use the output from the `check-linked-issues` step + - name: Get the output + run: echo "How many linked issues? ${{ steps.check-linked-issues.outputs.linked_issues_count }}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..d4020f083c --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,58 @@ +name: Keep Release + +on: + workflow_dispatch: + +jobs: + release: + runs-on: ubuntu-latest + concurrency: release + permissions: + id-token: write + contents: write + pull-requests: write + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + persist-credentials: false + ref: main + + - name: Release Keep + id: release-step + uses: python-semantic-release/python-semantic-release@v9.8.7 + with: + git_committer_name: Keep Release Bot + git_committer_email: no-reply@keephq.dev + github_token: ${{ secrets.GITHUB_TOKEN }} + push: false + tag: true + commit: true + + - name: Open PR for release branch + id: pr-step + uses: peter-evans/create-pull-request@v6.1.0 + with: + committer: Keep Release Bot + title: "Release - ${{ steps.release-step.outputs.version }}" + branch: release/${{ steps.release-step.outputs.version }} + body: "This PR contains the latest release changes." + draft: false + base: main + + - uses: peter-evans/enable-pull-request-automerge@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + pull-request-number: ${{ steps.pr-step.outputs.pull-request-number }} + + - name: Create release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + tag: "v${{ steps.release-step.outputs.version }}" + run: | + gh release create "$tag" \ + --repo="$GITHUB_REPOSITORY" \ + --title="v${{ steps.release-step.outputs.version }}" \ + --target="release/${{ steps.release-step.outputs.version }}" \ + --generate-notes diff --git a/.github/workflows/sync-keep-workflows.yml b/.github/workflows/sync-keep-workflows.yml new file mode 100644 index 0000000000..1681a1391b --- /dev/null +++ b/.github/workflows/sync-keep-workflows.yml @@ -0,0 +1,35 @@ +# A workflow that sync Keep workflows from a directory +name: "Sync Keep Workflows" + +on: + workflow_dispatch: + inputs: + keep_api_key: + description: 'Keep API Key' + required: false + keep_api_url: + description: 'Keep API URL' + required: false + default: 'https://api.keephq.dev' + # push: + # paths: + # - 'examples/workflows/**' + +jobs: + sync-workflows: + name: Sync workflows to Keep + runs-on: ubuntu-latest + # Use the Keep CLI image + container: + image: us-central1-docker.pkg.dev/keephq/keep/keep-cli:latest + env: + KEEP_API_KEY: ${{ secrets.KEEP_API_KEY || github.event.inputs.keep_api_key }} + KEEP_API_URL: ${{ secrets.KEEP_API_URL || github.event.inputs.keep_api_url }} + + steps: + - name: Check out the repo + uses: actions/checkout@v2 + + - name: Run Keep CLI + run: | + keep workflow apply -f examples/workflows diff --git a/.github/workflows/test-docs.yml b/.github/workflows/test-docs.yml new file mode 100644 index 0000000000..685908bb7e --- /dev/null +++ b/.github/workflows/test-docs.yml @@ -0,0 +1,64 @@ +name: Test docs +on: + push: + paths: + - 'keep/poviders/**' + - 'docs/**' + pull_request: + paths: + - 'keep/poviders/**' + - 'docs/**' + workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref }}-${{ github.job }} + cancel-in-progress: true +env: + PYTHON_VERSION: 3.11 + STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager + +jobs: + tests-docs: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + - uses: chartboost/ruff-action@v1 + with: + src: "./keep" + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: cache deps + id: cache-deps + uses: actions/cache@v2 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Validate docs for providers + run: | + cd scripts; + poetry run python ./docs_get_providers_list.py --validate + + - name: Install deps and validate docs + run: | + npm i -g mintlify; + + cd docs && mintlify broken-links; + cd ../scripts; + ./docs_validate_navigation.sh; + + # Todo: validate if openapi schema is matching with the code + \ No newline at end of file diff --git a/.github/workflows/test-pr-e2e.yml b/.github/workflows/test-pr-e2e.yml new file mode 100644 index 0000000000..ba7cb43e5e --- /dev/null +++ b/.github/workflows/test-pr-e2e.yml @@ -0,0 +1,162 @@ +name: Tests (E2E) + +on: + workflow_dispatch: + pull_request: + paths: + - 'keep/**' + - 'keep-ui/**' + - 'tests/**' + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHON_VERSION: 3.11 + STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager + # MySQL server environment variables + MYSQL_ROOT_PASSWORD: keep + MYSQL_DATABASE: keep + # Postgres environment variables + POSTGRES_USER: keepuser + POSTGRES_PASSWORD: keeppassword + POSTGRES_DB: keepdb + # To test if imports are working properly + EE_ENABLED: true + +jobs: + tests-e2e: + runs-on: ubuntu-latest + strategy: + matrix: + db_type: [mysql, postgres] + steps: + - name: Checkout + uses: actions/checkout@v3 + + - uses: chartboost/ruff-action@v1 + with: + src: "./keep" + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Cache dependencies + id: cache-deps + uses: actions/cache@v2 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Install Playwright dependencies + run: npx playwright install --with-deps + + - name: Install playwright + run: poetry run playwright install + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Set up Keep environment + run: | + DOCKER_BUILDKIT=1 docker compose \ + --project-directory . \ + -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml up -d + + - name: Wait for database to be ready + run: | + # Add commands to wait for the database to be ready + if [ "${{ matrix.db_type }}" == "mysql" ]; then + until docker exec $(docker ps -qf "name=keep-database") mysqladmin ping -h "localhost" --silent; do + echo "Waiting for MySQL to be ready..." + sleep 2 + done + elif [ "${{ matrix.db_type }}" == "postgres" ]; then + until docker exec $(docker ps -qf "name=keep-database") pg_isready -h localhost -U keepuser; do + echo "Waiting for Postgres to be ready..." + sleep 2 + done + fi + + # wait to keep backend on port 8080 + echo "Waiting for Keep backend to be ready..." + attempt=0 + max_attempts=10 + + until $(curl --output /dev/null --silent --fail http://localhost:8080/healthcheck); do + if [ "$attempt" -ge "$max_attempts" ]; then + echo "Max attempts reached, exiting... Sometimes Keep can't start because of double-headed migrations, use: 'alembic -c keep/alembic.ini history' to investigate, or check artifacts." + exit 1 + fi + echo "Waiting for Keep backend to be ready... (Attempt: $((attempt+1)))" + attempt=$((attempt+1)) + sleep 2 + done + + echo "Keep backend is ready!" + # wait to the backend + echo "Waiting for Keep frontend to be ready..." + attempt=0 + max_attempts=10 + + until $(curl --output /dev/null --silent --fail http://localhost:3000/); do + if [ "$attempt" -ge "$max_attempts" ]; then + echo "Max attempts reached, exiting..." + exit 1 + fi + echo "Waiting for Keep frontend to be ready... (Attempt: $((attempt+1)))" + attempt=$((attempt+1)) + sleep 2 + done + + # create the state directory + # mkdir -p ./state && chown -R root:root ./state && chmod -R 777 ./state + + - name: Run e2e tests and report coverage + run: | + poetry run coverage run --branch -m pytest -s tests/e2e_tests/ + + - name: Convert coverage results to JSON (for CodeCov support) + run: poetry run coverage json --omit="keep/providers/*" + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + fail_ci_if_error: false # don't fail if we didn't manage to upload the coverage report + files: coverage.json + verbose: true + + - name: Dump backend logs + if: always() + run: | + docker compose --project-directory . -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml logs keep-backend > backend_logs-${{ matrix.db_type }}.txt + docker compose --project-directory . -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml logs keep-frontend > frontend_logs-${{ matrix.db_type }}.txt + continue-on-error: true + + - name: Upload test artifacts on failure + if: always() + uses: actions/upload-artifact@v3 + with: + name: test-artifacts + path: | + playwright_dump_*.html + playwright_dump_*.png + backend_logs-${{ matrix.db_type }}.txt + frontend_logs-${{ matrix.db_type }}.txt + continue-on-error: true + + - name: Tear down environment + run: | + docker compose --project-directory . -f tests/e2e_tests/docker-compose-e2e-${{ matrix.db_type }}.yml down diff --git a/.github/workflows/test-pr.yml b/.github/workflows/test-pr.yml new file mode 100644 index 0000000000..522a07ca4a --- /dev/null +++ b/.github/workflows/test-pr.yml @@ -0,0 +1,111 @@ +name: Tests +on: + push: + paths: + - "keep/**" + pull_request: + paths: + - "keep/**" + workflow_dispatch: +permissions: + actions: write +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref }} + cancel-in-progress: true +# MySQL server and Elasticsearch for testing +env: + PYTHON_VERSION: 3.11 + STORAGE_MANAGER_DIRECTORY: /tmp/storage-manager + MYSQL_ROOT_PASSWORD: keep + MYSQL_DATABASE: keep + ELASTIC_PASSWORD: keeptests + +jobs: + tests: + runs-on: ubuntu-latest + services: + mysql: + image: mysql:5.7 + env: + MYSQL_ROOT_PASSWORD: ${{ env.MYSQL_ROOT_PASSWORD }} + MYSQL_DATABASE: ${{ env.MYSQL_DATABASE }} + ports: + - 3306:3306 + options: >- + --health-cmd="mysqladmin ping" + --health-interval=10s + --health-timeout=5s + --health-retries=3 + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.4 + ports: + - 9200:9200 + env: + ELASTIC_PASSWORD: ${{ env.ELASTIC_PASSWORD }} + bootstrap.memory_lock: "true" + discovery.type: "single-node" + ES_JAVA_OPTS: "-Xms2g -Xmx2g" + xpack.security.enabled: "true" + keycloak: + image: us-central1-docker.pkg.dev/keephq/keep/keep-keycloak-test + env: + KC_DB: dev-mem + KC_HTTP_RELATIVE_PATH: /auth + KEYCLOAK_ADMIN: keep_kc_admin + KEYCLOAK_ADMIN_PASSWORD: keep_kc_admin + ports: + - 8787:8080 + options: >- + --health-cmd="/opt/keycloak/bin/kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user keep_kc_admin --password keep_kc_admin || exit 1" + --health-interval=10s + --health-timeout=5s + --health-retries=4 + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - uses: chartboost/ruff-action@v1 + with: + src: "./keep" + + - name: Set up Python ${{ env.PYTHON_VERSION }} + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: cache deps + id: cache-deps + uses: actions/cache@v2 + with: + path: .venv + key: pydeps-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies using poetry + run: poetry install --no-interaction --no-root --with dev + + - name: Run unit tests and report coverage + run: | + # Add a step to wait for MySQL to be fully up and running + until nc -z 127.0.0.1 3306; do + echo "waiting for MySQL..." + sleep 1 + done + echo "MySQL is up and running!" + poetry run coverage run --branch -m pytest --ignore=tests/e2e_tests/ + + - name: Convert coverage results to JSON (for CodeCov support) + run: poetry run coverage json --omit="keep/providers/*" + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + fail_ci_if_error: false # don't fail if we didn't manage to upload the coverage report + files: coverage.json + verbose: true diff --git a/.gitignore b/.gitignore index 6f4a6ac569..4ff2fd406c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +# .DS_STORE +.DS_Store +**/.DS_Store + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -6,6 +10,9 @@ __pycache__/ # C extensions *.so +# .csv files +*.csv + # Distribution / packaging .Python build/ @@ -25,7 +32,6 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST - # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. @@ -44,6 +50,7 @@ htmlcov/ .coverage.* .cache nosetests.xml +coverage.lcov coverage.xml *.cover *.py,cover @@ -99,7 +106,7 @@ ipython_config.py # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -poetry.lock +#poetry.lock # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. @@ -161,3 +168,47 @@ cython_debug/ # vscode .vscode/ + +# keep configuration file +keep.yaml +.keep.yaml +providers.yaml +.vercel +keepstate.json + +# keep single tenant id +e1faa321-35df-486b-8fa8-3601ee714011* + +# sqlite db +*.sqlite3 +state/* +.terraform* +examples/alerts/dd.yml +keep-ui/node_modules +keep-ui/node_modules/* + +cov.xml +keep.db +RANDOM_USER_ID +storage + +# otel files +tempo-data/ + +# docs +docs/node_modules/ + +oauth2.cfg + + +scripts/automatic_extraction_rules.py + +playwright_dump_*.html +playwright_dump_*.png + +ee/experimental/ai_temp/* +,e!ee/experimental/ai_temp/.gitkeep + +oauth2.cfg +scripts/keep_slack_bot.py +keepnew.db diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b0ea35a25f..2fc9e4e62c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,12 +7,6 @@ repos: language: system types: [python] require_serial: true - # - id: yamllint - # name: yamllint - # description: This hook runs yamllint. - # entry: yamllint - # language: python - # types: [file, yaml] - id: end-of-file-fixer name: Fix End of Files entry: end-of-file-fixer @@ -32,9 +26,23 @@ repos: language: system types: [text] stages: [commit, push, manual] + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.6 + hooks: + # Run the linter. + - id: ruff + args: [--fix] - repo: https://github.com/compilerla/conventional-pre-commit rev: v2.1.1 hooks: - id: conventional-pre-commit stages: [commit-msg] args: [] # optional: list of Conventional Commits types to allow e.g. [feat, fix, ci, chore, test] + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v3.0.3 + hooks: + - id: prettier + types_or: + [javascript, jsx, ts, tsx, json, yaml, css, scss, html, markdown] + args: [--write] diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..a53da963a6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,25 @@ +# CHANGELOG +{% if context.history.unreleased | length > 0 %} + +{# UNRELEASED #} +## Unreleased +{% for type_, commits in context.history.unreleased | dictsort %} +### {{ type_ | capitalize }} +{% for commit in commits %}{% if type_ != "unknown" %} +* {{ commit.commit.message.rstrip() }} ([`{{ commit.commit.hexsha[:7] }}`]({{ commit.commit.hexsha | commit_hash_url }})) +{% else %} +* {{ commit.commit.message.rstrip() }} ([`{{ commit.commit.hexsha[:7] }}`]({{ commit.commit.hexsha | commit_hash_url }})) +{% endif %}{% endfor %}{% endfor %} + +{% endif %} + +{# RELEASED #} +{% for version, release in context.history.released.items() %} +## {{ version.as_tag() }} ({{ release.tagged_date.strftime("%Y-%m-%d") }}) +{% for type_, commits in release["elements"] | dictsort %} +### {{ type_ | capitalize }} +{% for commit in commits %}{% if type_ != "unknown" %} +* {{ commit.commit.message.rstrip() }} ([`{{ commit.commit.hexsha[:7] }}`]({{ commit.commit.hexsha | commit_hash_url }})) +{% else %} +* {{ commit.commit.message.rstrip() }} ([`{{ commit.commit.hexsha[:7] }}`]({{ commit.commit.hexsha | commit_hash_url }})) +{% endif %}{% endfor %}{% endfor %}{% endfor %} \ No newline at end of file diff --git a/CLI.md b/CLI.md deleted file mode 100644 index bba50551d6..0000000000 --- a/CLI.md +++ /dev/null @@ -1,11 +0,0 @@ - - -# paladin run -## paladin run db_disk_space.yml --hosts /hosts -## paladin run purchase_fails.yml --hosts /hosts --env-var .env -## - -# paladin test -# paladin import -# paladin export -# paladin diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..995a745fad --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,49 @@ +# Contributing to Keep +We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing new features +- Becoming a maintainer + +## We Develop with Github +We use github to host code, to track issues and feature requests, as well as accept pull requests. + +## We Use [Github Flow](https://guides.github.com/introduction/flow/index.html), So All Code Changes Happen Through Pull Requests +Pull requests are the best way to propose changes to the codebase (we use [Github Flow](https://guides.github.com/introduction/flow/index.html)). We actively welcome your pull requests: + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. Issue that pull request! + +## Any contributions you make will be under the MIT Software License +In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern. + +## Report bugs using Github's [issues](https://github.com/keephq/keep/issues) +We use GitHub issues to track public bugs. Report a bug by [opening a new issue](); it's that easy! + +**Great Bug Reports** tend to have: + +- A quick summary and/or background +- Steps to reproduce + - Be specific! + - Give sample code if you can. +- What you expected would happen +- What actually happens +- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) + +People *love* thorough bug reports. I'm not even kidding. + +## Use a Consistent Coding Style + +Follow PEP8, use `black` for formatting and `isort` to sort imports. + +## License +By contributing, you agree that your contributions will be licensed under its MIT License. + +## References +This document was adapted from the open-source contribution guidelines for [Facebook's Draft](https://github.com/facebook/draft-js/blob/a9316a723f9e918afde44dea68b5f9f39b7d9b00/CONTRIBUTING.md) diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..ff7b6827b6 --- /dev/null +++ b/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2024 Keep + +Portions of this software are licensed as follows: + +* All content that resides under the "ee/" directory of this repository, if that directory exists, is licensed under the license defined in "ee/LICENSE". +* Content outside of the above mentioned directories or restrictions above is available under the "MIT" license as defined below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 60e4cd575a..a9864afe04 100644 --- a/README.md +++ b/README.md @@ -1 +1,283 @@ -![Stronger than Guard Tower. Fires farther and is better armored.](https://static.wikia.nocookie.net/ageofempires/images/2/26/Towers_prev_aoe2.png/revision/latest?cb=20201223201548) +
+ +
+ +

The open-source alert management and AIOps platform

+ +
Single pane of glass, filtering, bi-directional integrations, alert correlation, workflows, enrichment, dashboards. +
AI correlation and AI summarization are under the limited preview (Book a Demo)
+
+ +
+ + Join Slack + + + +
+

+ Why Keep? + ยท + Getting started + ยท + Supported tools and integrations + ยท + Docs + ยท + Try it out + ยท + Website + ยท + Report Bug + ยท + Slack Community +

+ + +## How does it work? +1. **Connect your tools**: Connect everything from monitoring platforms to databases and ticketing systems. +
+ +| Connect providers | Receive alerts | +|----------|----------| +| | | + +
+ +2. **Set up Workflows**: Initiate automated workflows in response to alerts or based on custom intervals. + +
+ + +| Create and upload workflows | +|----------| +| | + +
+ +3. **Operational efficiency**: Automate your alert handling to focus your team's efforts on what really matters. + + +## Why Keep? +1. **Centralized dashboard**: Manage all your alerts across different platforms in a single interface. +2. **Noise reduction**: Deduplicate and correlate alerts to reduce alert fatigue. +3. **Automation**: Trigger workflows for alert enrichment and response. +4. **Developer-first**: Keep is API-first and lets you manage your workflows as code. +5. **Works with every tool**: Plenty of [supported providers](#supported-providers) and more to come. + + +## Workflows +The easiest way of thinking about Workflow in Keep is GitHub Actions. At its core, a Workflow in Keep is a declarative YAML file, composed of triggers, steps, and actions and serves to manage, enrich, and automate responses to alerts: +```yaml +workflow: + id: most-basic-keep-workflow + description: send a slack message when a cloudwatch alarm is triggered + # workflow triggers - supports alerts, interval, and manual triggers + triggers: + - type: alert + filters: + - key: source + value: cloudwatch + - type: manual + # list of steps that can add context to your alert + steps: + - name: enrich-alert-with-more-data-from-a-database + provider: + type: bigquery + config: "{{ providers.bigquery-prod }}" + with: + query: "SELECT customer_id, customer_type as date FROM `customers_prod` LIMIT 1" + # list of actions that can automate response and do things with your alert + actions: + - name: trigger-slack + provider: + type: slack + config: " {{ providers.slack-prod }} " + with: + message: "Got alarm from aws cloudwatch! {{ alert.name }}" +``` +Workflow triggers can either be executed manually when an alert is activated or run at predefined intervals. More examples can be found [here](https://github.com/keephq/keep/tree/main/examples/workflows). + +## Supported Providers +> Missing any? Just submit a [new provider issue](https://github.com/keephq/keep/issues/new?assignees=&labels=provider&projects=&template=new_provider_request.md&title=) and we will add it in the blink of an eye. + +

Observability tools

+

+ +            + +            + +            + +            + +            + +            + +            + +            + +            + +

+

+ +            + +            + +            + +            + +            + +            + +            + +

+

Databases and data warehouses

+

+ +            + +            + +            + +            + +

+

Communication platforms

+

+ +            + +            + +            + +            + +            + +            + +            + +            + +            + +            + +

+

Incident Management tools

+

+ +            + +            + +            + +            + +            + +            + +            + +            + +            + +            + +            + +            + +            + +            + +            + +

+

Ticketing tools

+

+ +            + +            + +            + +            + +            + +

+

Container Orchestration platforms

+

+ +            + +

+ +## Getting Started +### Overview +Keep composed of three main components: +1. [Keep UI](https://github.com/keephq/keep/tree/main/keep-ui) - A NextJS app to connect your providers, centralize alerts and create the workflows. +2. [Keep Backend](https://github.com/keephq/keep/tree/main/keep) - A FastAPI server that implements the business logic behind Keep, including integrating with the tools, working with alerts and scheduling and running the workflows. +3. [Keep CLI](https://github.com/keephq/keep/blob/main/keep/cli/cli.py) - A CLI that lets you control and manage Keep via CLI. + +>**Disclaimer**: we use [PostHog](https://posthog.com/faq) to collect anonymous telemetries to better learn how users use Keep (masked screen recordings for CLI commands) +To turn PostHog off, set the `DISABLE_POSTHOG=true` environment variable and remove the `NEXT_PUBLIC_POSTHOG_KEY` environment variable. + +### Quickstart +#### Spinning up Keep with docker-compose +The easiest way to start with Keep is to run it via docker-compose: +```shell +curl https://raw.githubusercontent.com/keephq/keep/main/start.sh | sh +``` +The UI is now available at http://localhost:3000 and the backend is available at http://localhost:8080. + +#### Spinning up Keep with Helm on Kubernetes/Openshift +To install Keep to your Kubernetes ease free with Helm, run the following commands: + +```shell +helm repo add keephq https://keephq.github.io/helm-charts +helm pull keephq/keep +helm install keep keephq/keep +``` + +More information about the Helm chart can be found [here](https://github.com/keephq/helm-charts). + +#### Local development +You can also start Keep within your favorite IDE, e.g. [VSCode](https://docs.keephq.dev/development/getting-started#vscode) + +#### Wanna get Keep up and running in production? Go through our detailed [development guide](https://docs.keephq.dev/development) + +## ๐Ÿซต Keepers + +### Top Contributors +A special thanks to our top contributors who help us make Keep great. You are more than awesome! + +- [Furkan](https://github.com/pehlicd) +- [Asharon](https://github.com/asharonbaltazar) + +Want to become a top contributor? Join our Slack and DM Tal, Shahar, or Furkan. + +### Contributors +Thank you for contributing and continuously making Keep better, you're awesome ๐Ÿซถ + + + + diff --git a/STRESS.md b/STRESS.md new file mode 100644 index 0000000000..dd248cc221 --- /dev/null +++ b/STRESS.md @@ -0,0 +1,58 @@ + +# UNDER CONSTRUCTION + +# First, create a Kubernetes cluster + + +# Install Keep +gcloud config set project keep-dev-429814 +gcloud container clusters get-credentials keep-stress --zone us-central1-c --project keep-dev-429814 +helm repo add keephq https://keephq.github.io/helm-charts +helm pull keephq/keep +# create the namespace +kubectl create namespace keep +# install keep +helm install keep keephq/keep --namespace keep +# from local +helm install keep ./charts/keep --namespace keep + +kubectl -n keep describe pod keep-backend-697f6b946f-v2jxp +kubectl -n keep logs keep-frontend-577fdf5497-r8ht9 +# Import alerts + +# uninstall +helm uninstall keep --namespace keep + +kubectl -n keep exec -it keep-backend-64c4d7ddb7-7p5q5 /bin/bash +# copy the db +kubectl -n keep exec -it keep-database-86dd6b6775-92sz4 /bin/bash +kubectl -n keep cp ./keep.sql keep-database-659c69689-vxhkz:/tmp/keep.sql +kubectl -n keep exec -it keep-database-659c69689-vxhkz -- bash -c "mysql -u root keep < /tmp/keep.sql" +# exec into the pod +kubectl -n keep exec -it keep-database-86dd6b6775-92sz4 -- /bin/bash +# import +kubectl -n keep exec -it keep-database-659c69689-vxhkz -- bash -c "mysql -u root keep < /tmp/keep.sql" + +# No Load +## 500k alerts - 1Gi/250m cpu: get_last_alerts 2 minutes and 30 seconds +Keep Backend Workers get a timeout after one minute (status code 500 for preset and alert endpoints) +## 500k alerts - 2Gi/500m cpu: +- default mysql: get_last_alerts 1 minutes and 30 seconds +- innodb_buffer_pool_size = 4294967296: 25 seconds, 3 seconds after cache +## 500k alerts - 4Gi/1 cpu: get_last_alerts 2 minutes and 30 seconds +- +## 500k alerts - 8Gi/1 cpu: get_last_alerts 2 minutes and 30 seconds + +# Load 10 alerts per minute + +# Load 100 alerts per minute + +# Load 1000 alerts per minute + + +## 1M alerts +# Load 10 alerts per minute + +# Load 100 alerts per minute + +# Load 1000 alerts per minute diff --git a/assets/connect_providers.gif b/assets/connect_providers.gif new file mode 100644 index 0000000000..9c1069da4c Binary files /dev/null and b/assets/connect_providers.gif differ diff --git a/assets/keep.png b/assets/keep.png new file mode 100644 index 0000000000..91a9e0987d Binary files /dev/null and b/assets/keep.png differ diff --git a/assets/upload_workflow.gif b/assets/upload_workflow.gif new file mode 100644 index 0000000000..2504e743d7 Binary files /dev/null and b/assets/upload_workflow.gif differ diff --git a/assets/view_alerts.gif b/assets/view_alerts.gif new file mode 100644 index 0000000000..21b93fe9d4 Binary files /dev/null and b/assets/view_alerts.gif differ diff --git a/docker-compose-with-arq.yml b/docker-compose-with-arq.yml new file mode 100644 index 0000000000..9c2f0f300d --- /dev/null +++ b/docker-compose-with-arq.yml @@ -0,0 +1,51 @@ +services: + keep-frontend: + extends: + file: docker-compose.common.yml + service: keep-frontend-common + image: us-central1-docker.pkg.dev/keephq/keep/keep-ui + environment: + - AUTH_TYPE=NO_AUTH + - API_URL=http://keep-backend:8080 + volumes: + - ./state:/state + depends_on: + - keep-backend + + keep-backend: + extends: + file: docker-compose.common.yml + service: keep-backend-common + image: us-central1-docker.pkg.dev/keephq/keep/keep-api + environment: + - AUTH_TYPE=NO_AUTH + - REDIS=true + - REDIS_HOST=keep-arq-redis + - REDIS_PORT=6379 + volumes: + - ./state:/state + depends_on: + - keep-arq-redis + + keep-websocket-server: + extends: + file: docker-compose.common.yml + service: keep-websocket-server-common + + keep-arq-redis: + image: redis/redis-stack + ports: + - "6379:6379" + - "8081:8001" + + keep-arq-dashboard: + image: us-central1-docker.pkg.dev/keephq/keep/keep-arq-dashboard + ports: + - "8082:8000" + entrypoint: + - "uvicorn" + - "--host" + - "0.0.0.0" + - "arq_dashboard:app" + environment: + - ARQ_DASHBOARD_REDIS_URL=redis://keep-arq-redis:6379 diff --git a/docker-compose-with-auth.yml b/docker-compose-with-auth.yml new file mode 100644 index 0000000000..acdf8458b7 --- /dev/null +++ b/docker-compose-with-auth.yml @@ -0,0 +1,31 @@ +services: + keep-frontend: + extends: + file: docker-compose.common.yml + service: keep-frontend-common + image: us-central1-docker.pkg.dev/keephq/keep/keep-ui + environment: + - AUTH_TYPE=SINGLE_TENANT + - API_URL=http://keep-backend:8080 + volumes: + - ./state:/state + depends_on: + - keep-backend + + keep-backend: + extends: + file: docker-compose.common.yml + service: keep-backend-common + image: us-central1-docker.pkg.dev/keephq/keep/keep-api + environment: + - AUTH_TYPE=SINGLE_TENANT + - KEEP_JWT_SECRET=verysecretkey + - KEEP_DEFAULT_USERNAME=keep + - KEEP_DEFAULT_PASSWORD=keep + volumes: + - ./state:/state + + keep-websocket-server: + extends: + file: docker-compose.common.yml + service: keep-websocket-server-common diff --git a/docker-compose-with-otel.yaml b/docker-compose-with-otel.yaml new file mode 100644 index 0000000000..84b668597b --- /dev/null +++ b/docker-compose-with-otel.yaml @@ -0,0 +1,136 @@ +services: + loki: + image: grafana/loki:latest + profiles: + - otel + + ports: + - "3100:3100" + command: ["-config.file=/etc/loki/local-config.yaml"] + + tempo: + image: grafana/tempo:latest + profiles: + - otel + command: ["-config.file=/etc/tempo.yaml"] + volumes: + - ./otel-shared/tempo.yaml:/etc/tempo.yaml + - ./tempo-data:/tmp/tempo + ports: + - "14268:14268" # jaeger ingest + - "3200:3200" # tempo + - "9095:9095" # tempo grpc + - "4317:4317" # otlp grpc + - "4318:4318" # otlp http + - "9411:9411" # zipkin + + prometheus: + image: prom/prometheus:latest + profiles: + - otel + + command: + - --config.file=/etc/prometheus.yaml + - --web.enable-remote-write-receiver + - --enable-feature=exemplar-storage + volumes: + - ./otel-shared/prometheus.yaml:/etc/prometheus.yaml + ports: + - "9090:9090" + + alertmanager: + image: prom/alertmanager + profiles: + - otel + + container_name: alertmanager + volumes: + - ./otel-shared/alertmanager.yml:/etc/alertmanager/alertmanager.yml + command: + - "--config.file=/etc/alertmanager/alertmanager.yml" + + grafana: + image: grafana/grafana:10.0.3 + profiles: + - otel + + depends_on: + - loki + - tempo + - prometheus + volumes: + - ./otel-shared/grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml + environment: + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + - GF_AUTH_DISABLE_LOGIN_FORM=false + - GF_FEATURE_TOGGLES_ENABLE=traceqlEditor + ports: + - "3001:3000" + + # OpenTelemetry collector. Make sure you set USERID and GOOGLE_APPLICATION_CREDENTIALS + # environment variables for your container to authenticate correctly + otel-collector: + image: otel/opentelemetry-collector-contrib:0.81.0 + profiles: + - otel + + ports: + - "9100:9100" + depends_on: + - tempo + - loki + volumes: + - ./otel-shared/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml + + keep-frontend-dev: + extends: + file: docker-compose.common.yml + service: keep-frontend-common + environment: + - API_URL=http://keep-backend-dev:8080 + build: + dockerfile: docker/Dockerfile.dev.ui + volumes: + - ./keep-ui:/app + - /app/node_modules + - /app/.next + depends_on: + - keep-backend-dev + + keep-backend-dev: + extends: + file: docker-compose.common.yml + service: keep-backend-common + build: + dockerfile: docker/Dockerfile.dev.api + environment: + - OTEL_SERVICE_NAME=keephq + - OTLP_ENDPOINT=http://otel-collector:4317 + - METRIC_OTEL_ENABLED=true + volumes: + - .:/app + - ./state:/state + + keep-websocket-server: + extends: + file: docker-compose.common.yml + service: keep-websocket-server-common + + log_collector: + image: timberio/vector:0.32.2-debian + profiles: + - otel + volumes: + - ./otel-shared/vector.toml:/etc/vector/vector.toml + - /var/run/docker.sock:/var/run/docker.sock + +volumes: + certs: + driver: local + esdata01: + driver: local + kibanadata: + driver: local + + db_data: diff --git a/docker-compose.common.yml b/docker-compose.common.yml new file mode 100644 index 0000000000..bec91fb877 --- /dev/null +++ b/docker-compose.common.yml @@ -0,0 +1,42 @@ +services: + keep-frontend-common: + ports: + - "3000:3000" + environment: + - NEXTAUTH_SECRET=secret + - NEXTAUTH_URL=http://localhost:3000 + - NEXT_PUBLIC_API_URL=http://localhost:8080 + - POSTHOG_KEY=phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ + - POSTHOG_HOST=https://app.posthog.com + - PUSHER_HOST=localhost + - PUSHER_PORT=6001 + - PUSHER_APP_KEY=keepappkey + - NEXT_PUBLIC_KEEP_VERSION=0.2.9 + + keep-backend-common: + ports: + - "8080:8080" + environment: + - PORT=8080 + - SECRET_MANAGER_TYPE=FILE + - SECRET_MANAGER_DIRECTORY=/state + - DATABASE_CONNECTION_STRING=sqlite:////state/db.sqlite3?check_same_thread=False + - OPENAI_API_KEY=$OPENAI_API_KEY + - PUSHER_APP_ID=1 + - PUSHER_APP_KEY=keepappkey + - PUSHER_APP_SECRET=keepappsecret + - PUSHER_HOST=keep-websocket-server + - PUSHER_PORT=6001 + - USE_NGROK=false + + keep-websocket-server-common: + image: quay.io/soketi/soketi:1.4-16-debian + ports: + - "6001:6001" + - "9601:9601" + environment: + - SOKETI_USER_AUTHENTICATION_TIMEOUT=3000 + - SOKETI_DEBUG=1 + - SOKETI_DEFAULT_APP_ID=1 + - SOKETI_DEFAULT_APP_KEY=keepappkey + - SOKETI_DEFAULT_APP_SECRET=keepappsecret diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 0000000000..2d2da6fcb7 --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,30 @@ +services: + keep-frontend-dev: + extends: + file: docker-compose.common.yml + service: keep-frontend-common + environment: + - API_URL=http://keep-backend-dev:8080 + build: + dockerfile: docker/Dockerfile.dev.ui + volumes: + - ./keep-ui:/app + - /app/node_modules + - /app/.next + depends_on: + - keep-backend-dev + + keep-backend-dev: + extends: + file: docker-compose.common.yml + service: keep-backend-common + build: + dockerfile: docker/Dockerfile.dev.api + volumes: + - .:/app + - ./state:/state + + keep-websocket-server: + extends: + file: docker-compose.common.yml + service: keep-websocket-server-common diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000..68291e6b6e --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,28 @@ +services: + keep-frontend: + extends: + file: docker-compose.common.yml + service: keep-frontend-common + image: us-central1-docker.pkg.dev/keephq/keep/keep-ui + environment: + - AUTH_TYPE=NO_AUTH + - API_URL=http://keep-backend:8080 + volumes: + - ./state:/state + depends_on: + - keep-backend + + keep-backend: + extends: + file: docker-compose.common.yml + service: keep-backend-common + image: us-central1-docker.pkg.dev/keephq/keep/keep-api + environment: + - AUTH_TYPE=NO_AUTH + volumes: + - ./state:/state + + keep-websocket-server: + extends: + file: docker-compose.common.yml + service: keep-websocket-server-common diff --git a/docker/Dockerfile.api b/docker/Dockerfile.api new file mode 100644 index 0000000000..291210d252 --- /dev/null +++ b/docker/Dockerfile.api @@ -0,0 +1,38 @@ +FROM python:3.11.6-slim as base + +ENV PYTHONFAULTHANDLER=1 \ + PYTHONHASHSEED=random \ + PYTHONUNBUFFERED=1 + +RUN useradd --user-group --system --create-home --no-log-init keep +WORKDIR /app + +FROM base as builder + +ENV PIP_DEFAULT_TIMEOUT=100 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PIP_NO_CACHE_DIR=1 \ + POETRY_VERSION=1.3.2 + +RUN pip install "poetry==$POETRY_VERSION" +RUN python -m venv /venv +COPY pyproject.toml poetry.lock ./ +RUN poetry export -f requirements.txt --output requirements.txt --without-hashes && /venv/bin/python -m pip install --upgrade -r requirements.txt +COPY keep keep +COPY ee keep/ee +COPY examples examples +COPY README.md README.md +RUN poetry build && /venv/bin/pip install --use-deprecated=legacy-resolver dist/*.whl + +FROM base as final +ENV PATH="/venv/bin:${PATH}" +ENV VIRTUAL_ENV="/venv" +ENV EE_PATH="ee" +COPY --from=builder /venv /venv +COPY --from=builder /app/examples /examples +# as per Openshift guidelines, https://docs.openshift.com/container-platform/4.11/openshift_images/create-images.html#use-uid_create-images +RUN chgrp -R 0 /app && chmod -R g=u /app +RUN chown -R keep:keep /app +RUN chown -R keep:keep /venv +USER keep +ENTRYPOINT ["gunicorn", "keep.api.api:get_app", "--bind" , "0.0.0.0:8080" , "--workers", "4" , "-k" , "uvicorn.workers.UvicornWorker", "-c", "/venv/lib/python3.11/site-packages/keep/api/config.py"] diff --git a/docker/Dockerfile.cli b/docker/Dockerfile.cli new file mode 100644 index 0000000000..c5632fd213 --- /dev/null +++ b/docker/Dockerfile.cli @@ -0,0 +1,25 @@ +FROM python:3.11.6-slim as base + +ENV PYTHONFAULTHANDLER=1 \ + PYTHONHASHSEED=random \ + PYTHONUNBUFFERED=1 + +WORKDIR /app + +FROM base as builder + +ENV PIP_DEFAULT_TIMEOUT=100 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PIP_NO_CACHE_DIR=1 \ + POETRY_VERSION=1.3.2 + +RUN pip install "poetry==$POETRY_VERSION" +RUN python -m venv /venv +COPY . . +RUN poetry build && /venv/bin/pip install --use-deprecated=legacy-resolver dist/*.whl + +FROM base as final + +ENV PATH="/venv/bin:${PATH}" +ENV VIRTUAL_ENV="/venv" +COPY --from=builder /venv /venv diff --git a/docker/Dockerfile.dev.api b/docker/Dockerfile.dev.api new file mode 100644 index 0000000000..5b3f12b703 --- /dev/null +++ b/docker/Dockerfile.dev.api @@ -0,0 +1,26 @@ +FROM python:3.11.6-slim as base + +ENV PYTHONFAULTHANDLER=1 \ + PYTHONHASHSEED=random \ + PYTHONUNBUFFERED=1 + +WORKDIR /app + +# Creating a virtual environment and installing dependencies +ENV PIP_DEFAULT_TIMEOUT=100 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PIP_NO_CACHE_DIR=1 \ + POETRY_VERSION=1.3.2 + +RUN pip install "poetry==$POETRY_VERSION" +RUN python -m venv /venv +COPY pyproject.toml ./ +RUN . /venv/bin/activate && poetry install --no-root + +# Setting the virtual environment path +ENV PYTHONPATH="/app:${PYTHONPATH}" +ENV PATH="/venv/bin:${PATH}" +ENV VIRTUAL_ENV="/venv" + + +CMD ["gunicorn", "keep.api.api:get_app", "--bind" , "0.0.0.0:8080" , "--workers", "1" , "-k" , "uvicorn.workers.UvicornWorker", "-c", "./keep/api/config.py", "--reload"] diff --git a/docker/Dockerfile.dev.ui b/docker/Dockerfile.dev.ui new file mode 100644 index 0000000000..0fd7ae4c8b --- /dev/null +++ b/docker/Dockerfile.dev.ui @@ -0,0 +1,26 @@ +# Use node alpine as it's a small node image +FROM node:alpine + +# Create the directory on the node image +# where our Next.js app will live +RUN mkdir -p /app + +# Set /app as the working directory +WORKDIR /app + +# Copy package.json and package-lock.json +# to the /app working directory +COPY keep-ui/package*.json /app/ + +# Copy the rest of our Next.js folder into /app +COPY ./keep-ui/ /app + +# Install dependencies in /app +RUN npm install +# Install next globally and create a symlink +RUN npm install -g next +RUN ln -s /usr/local/lib/node_modules/next/dist/bin/next /usr/local/bin/next || echo "next binary already linked to bin" +# Ensure port 3000 is accessible to our system +EXPOSE 3000 + +CMD ["npm", "run", "dev"] diff --git a/docker/Dockerfile.ui b/docker/Dockerfile.ui new file mode 100644 index 0000000000..549506c3ed --- /dev/null +++ b/docker/Dockerfile.ui @@ -0,0 +1,74 @@ + + +FROM node:18-alpine AS base + +# Install dependencies only when needed +FROM base AS deps +# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. +RUN apk add --no-cache libc6-compat +WORKDIR /app + +# Install dependencies based on the preferred package manager +COPY package.json package-lock.json ./ +RUN npm ci --noproxy registry.npmjs.org --maxsockets 1 + + +# Rebuild the source code only when needed +FROM base AS builder +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules +COPY . . + +# Next.js collects completely anonymous telemetry data about general usage. +# Learn more here: https://nextjs.org/telemetry +# Uncomment the following line in case you want to disable telemetry during the build. +ENV NEXT_TELEMETRY_DISABLED 1 + +# If using npm comment out above and use below instead +ENV API_URL http://localhost:8080 +RUN npm run build + + +# Production image, copy all the files and run next +FROM base AS runner +ARG GIT_COMMIT_HASH=local +ARG KEEP_VERSION=local + +WORKDIR /app +# Inject the git commit hash into the build +# This is being injected from the build script +ENV GIT_COMMIT_HASH=${GIT_COMMIT_HASH} +ENV KEEP_VERSION=${KEEP_VERSION} + + + +ENV NODE_ENV production +# Uncomment the following line in case you want to disable telemetry during runtime. +ENV NEXT_TELEMETRY_DISABLED 1 + +RUN addgroup --system --gid 1001 nodejs +RUN adduser --system --uid 1001 nextjs + +COPY --from=builder /app/public ./public + +# Automatically leverage output traces to reduce image size +# https://nextjs.org/docs/advanced-features/output-file-tracing +COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static +COPY entrypoint.sh /app/entrypoint.sh + +# as per Openshift guidelines, https://docs.openshift.com/container-platform/4.11/openshift_images/create-images.html#use-uid_create-images +RUN chgrp -R 0 /app && chmod -R g=u /app +USER nextjs + +EXPOSE 3000 + +ENV PORT 3000 +ENV POSTHOG_KEY=phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ +ENV POSTHOG_HOST=https://app.posthog.com +ENV PUSHER_HOST=localhost +ENV PUSHER_PORT=6001 +ENV PUSHER_APP_KEY=keepappkey + + +ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..6d5a902819 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,8 @@ +How to run docs locally: + +``` +npm i -g mintlify +mintlify dev +``` + +Read more: https://mintlify.com/docs/development diff --git a/docs/api-ref/actions/add-actions.mdx b/docs/api-ref/actions/add-actions.mdx new file mode 100644 index 0000000000..0d49ef1f94 --- /dev/null +++ b/docs/api-ref/actions/add-actions.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /actions +--- \ No newline at end of file diff --git a/docs/api-ref/actions/create-actions.mdx b/docs/api-ref/actions/create-actions.mdx new file mode 100644 index 0000000000..0d49ef1f94 --- /dev/null +++ b/docs/api-ref/actions/create-actions.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /actions +--- \ No newline at end of file diff --git a/docs/api-ref/actions/delete-action.mdx b/docs/api-ref/actions/delete-action.mdx new file mode 100644 index 0000000000..1d4fdc257b --- /dev/null +++ b/docs/api-ref/actions/delete-action.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /actions/{action_id} +--- \ No newline at end of file diff --git a/docs/api-ref/actions/get-actions.mdx b/docs/api-ref/actions/get-actions.mdx new file mode 100644 index 0000000000..138366466c --- /dev/null +++ b/docs/api-ref/actions/get-actions.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /actions +--- diff --git a/docs/api-ref/actions/put-action.mdx b/docs/api-ref/actions/put-action.mdx new file mode 100644 index 0000000000..63996dc830 --- /dev/null +++ b/docs/api-ref/actions/put-action.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /actions/{action_id} +--- \ No newline at end of file diff --git a/docs/api-ref/actions/update-action.mdx b/docs/api-ref/actions/update-action.mdx new file mode 100644 index 0000000000..63996dc830 --- /dev/null +++ b/docs/api-ref/actions/update-action.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /actions/{action_id} +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/assign-alert.mdx b/docs/api-ref/alerts/assign-alert.mdx new file mode 100644 index 0000000000..4195b278d1 --- /dev/null +++ b/docs/api-ref/alerts/assign-alert.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /alerts/{fingerprint}/assign/{last_received} +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/delete-alert.mdx b/docs/api-ref/alerts/delete-alert.mdx new file mode 100644 index 0000000000..eaa7465af0 --- /dev/null +++ b/docs/api-ref/alerts/delete-alert.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /alerts +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/enrich-alert.mdx b/docs/api-ref/alerts/enrich-alert.mdx new file mode 100644 index 0000000000..6f700169eb --- /dev/null +++ b/docs/api-ref/alerts/enrich-alert.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /alerts/enrich +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-alert-audit.mdx b/docs/api-ref/alerts/get-alert-audit.mdx new file mode 100644 index 0000000000..e3566b8b45 --- /dev/null +++ b/docs/api-ref/alerts/get-alert-audit.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /alerts/{fingerprint}/audit +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-alert-history.mdx b/docs/api-ref/alerts/get-alert-history.mdx new file mode 100644 index 0000000000..6d5c177492 --- /dev/null +++ b/docs/api-ref/alerts/get-alert-history.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /alerts/{fingerprint}/history +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-alert.mdx b/docs/api-ref/alerts/get-alert.mdx new file mode 100644 index 0000000000..d293028b04 --- /dev/null +++ b/docs/api-ref/alerts/get-alert.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /alerts/{fingerprint} +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-alerts.mdx b/docs/api-ref/alerts/get-alerts.mdx new file mode 100644 index 0000000000..17d142a241 --- /dev/null +++ b/docs/api-ref/alerts/get-alerts.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /alerts +--- diff --git a/docs/api-ref/alerts/get-all-alerts.mdx b/docs/api-ref/alerts/get-all-alerts.mdx new file mode 100644 index 0000000000..b425ccc40b --- /dev/null +++ b/docs/api-ref/alerts/get-all-alerts.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /alerts +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/get-multiple-fingerprint-alert-audit.mdx b/docs/api-ref/alerts/get-multiple-fingerprint-alert-audit.mdx new file mode 100644 index 0000000000..fd02186ede --- /dev/null +++ b/docs/api-ref/alerts/get-multiple-fingerprint-alert-audit.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /alerts/audit +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/receive-event.mdx b/docs/api-ref/alerts/receive-event.mdx new file mode 100644 index 0000000000..4d18d92b53 --- /dev/null +++ b/docs/api-ref/alerts/receive-event.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /alerts/event/{provider_type} +--- diff --git a/docs/api-ref/alerts/receive-generic-event.mdx b/docs/api-ref/alerts/receive-generic-event.mdx new file mode 100644 index 0000000000..ca8fbf0144 --- /dev/null +++ b/docs/api-ref/alerts/receive-generic-event.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /alerts/event +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/search-alerts.mdx b/docs/api-ref/alerts/search-alerts.mdx new file mode 100644 index 0000000000..1b5f4f4ed0 --- /dev/null +++ b/docs/api-ref/alerts/search-alerts.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /alerts/search +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/unenrich-alert.mdx b/docs/api-ref/alerts/unenrich-alert.mdx new file mode 100644 index 0000000000..1a76823378 --- /dev/null +++ b/docs/api-ref/alerts/unenrich-alert.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /alerts/unenrich +--- \ No newline at end of file diff --git a/docs/api-ref/alerts/webhook-challenge.mdx b/docs/api-ref/alerts/webhook-challenge.mdx new file mode 100644 index 0000000000..2aa6c8bb1a --- /dev/null +++ b/docs/api-ref/alerts/webhook-challenge.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /alerts/event/netdata +--- \ No newline at end of file diff --git a/docs/api-ref/auth/create-group.mdx b/docs/api-ref/auth/create-group.mdx new file mode 100644 index 0000000000..e0371dbd65 --- /dev/null +++ b/docs/api-ref/auth/create-group.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /auth/groups +--- diff --git a/docs/api-ref/auth/create-permissions.mdx b/docs/api-ref/auth/create-permissions.mdx new file mode 100644 index 0000000000..e9973a8ea3 --- /dev/null +++ b/docs/api-ref/auth/create-permissions.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /auth/permissions +--- diff --git a/docs/api-ref/auth/create-role.mdx b/docs/api-ref/auth/create-role.mdx new file mode 100644 index 0000000000..9533e4b0f9 --- /dev/null +++ b/docs/api-ref/auth/create-role.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /auth/roles +--- diff --git a/docs/api-ref/auth/create-user.mdx b/docs/api-ref/auth/create-user.mdx new file mode 100644 index 0000000000..f7e911d458 --- /dev/null +++ b/docs/api-ref/auth/create-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /auth/users +--- diff --git a/docs/api-ref/auth/delete-group.mdx b/docs/api-ref/auth/delete-group.mdx new file mode 100644 index 0000000000..ac1d4daaa8 --- /dev/null +++ b/docs/api-ref/auth/delete-group.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /auth/groups/{group_name} +--- diff --git a/docs/api-ref/auth/delete-role.mdx b/docs/api-ref/auth/delete-role.mdx new file mode 100644 index 0000000000..0cdfc5ab17 --- /dev/null +++ b/docs/api-ref/auth/delete-role.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /auth/roles/{role_id} +--- diff --git a/docs/api-ref/auth/delete-user.mdx b/docs/api-ref/auth/delete-user.mdx new file mode 100644 index 0000000000..37c1399c85 --- /dev/null +++ b/docs/api-ref/auth/delete-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /auth/users/{user_email} +--- diff --git a/docs/api-ref/auth/get-groups.mdx b/docs/api-ref/auth/get-groups.mdx new file mode 100644 index 0000000000..8aca8eb702 --- /dev/null +++ b/docs/api-ref/auth/get-groups.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /auth/groups +--- diff --git a/docs/api-ref/auth/get-permissions.mdx b/docs/api-ref/auth/get-permissions.mdx new file mode 100644 index 0000000000..66c2d60c43 --- /dev/null +++ b/docs/api-ref/auth/get-permissions.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /auth/permissions +--- diff --git a/docs/api-ref/auth/get-roles.mdx b/docs/api-ref/auth/get-roles.mdx new file mode 100644 index 0000000000..193dd4b2fc --- /dev/null +++ b/docs/api-ref/auth/get-roles.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /auth/roles +--- diff --git a/docs/api-ref/auth/get-scopes.mdx b/docs/api-ref/auth/get-scopes.mdx new file mode 100644 index 0000000000..5259c4b960 --- /dev/null +++ b/docs/api-ref/auth/get-scopes.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /auth/permissions/scopes +--- diff --git a/docs/api-ref/auth/get-users.mdx b/docs/api-ref/auth/get-users.mdx new file mode 100644 index 0000000000..7d07258617 --- /dev/null +++ b/docs/api-ref/auth/get-users.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /auth/users +--- diff --git a/docs/api-ref/auth/update-group.mdx b/docs/api-ref/auth/update-group.mdx new file mode 100644 index 0000000000..d8d1a787eb --- /dev/null +++ b/docs/api-ref/auth/update-group.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /auth/groups/{group_name} +--- diff --git a/docs/api-ref/auth/update-role.mdx b/docs/api-ref/auth/update-role.mdx new file mode 100644 index 0000000000..13f1599918 --- /dev/null +++ b/docs/api-ref/auth/update-role.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /auth/roles/{role_id} +--- diff --git a/docs/api-ref/auth/update-user.mdx b/docs/api-ref/auth/update-user.mdx new file mode 100644 index 0000000000..46c5cbc718 --- /dev/null +++ b/docs/api-ref/auth/update-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /auth/users/{user_email} +--- diff --git a/docs/api-ref/dashboard/create-dashboard.mdx b/docs/api-ref/dashboard/create-dashboard.mdx new file mode 100644 index 0000000000..2f910af2d8 --- /dev/null +++ b/docs/api-ref/dashboard/create-dashboard.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /dashboard +--- \ No newline at end of file diff --git a/docs/api-ref/dashboard/delete-dashboard.mdx b/docs/api-ref/dashboard/delete-dashboard.mdx new file mode 100644 index 0000000000..f2c059738a --- /dev/null +++ b/docs/api-ref/dashboard/delete-dashboard.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /dashboard/{dashboard_id} +--- \ No newline at end of file diff --git a/docs/api-ref/dashboard/read-dashboards.mdx b/docs/api-ref/dashboard/read-dashboards.mdx new file mode 100644 index 0000000000..da275d36fd --- /dev/null +++ b/docs/api-ref/dashboard/read-dashboards.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /dashboard +--- \ No newline at end of file diff --git a/docs/api-ref/dashboard/update-dashboard.mdx b/docs/api-ref/dashboard/update-dashboard.mdx new file mode 100644 index 0000000000..ea9d45631a --- /dev/null +++ b/docs/api-ref/dashboard/update-dashboard.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /dashboard/{dashboard_id} +--- \ No newline at end of file diff --git a/docs/api-ref/deduplications/create-deduplication-rule.mdx b/docs/api-ref/deduplications/create-deduplication-rule.mdx new file mode 100644 index 0000000000..d2cae167c6 --- /dev/null +++ b/docs/api-ref/deduplications/create-deduplication-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /deduplications +--- \ No newline at end of file diff --git a/docs/api-ref/deduplications/delete-deduplication-rule.mdx b/docs/api-ref/deduplications/delete-deduplication-rule.mdx new file mode 100644 index 0000000000..50c8500108 --- /dev/null +++ b/docs/api-ref/deduplications/delete-deduplication-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /deduplications/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/deduplications/get-deduplication-fields.mdx b/docs/api-ref/deduplications/get-deduplication-fields.mdx new file mode 100644 index 0000000000..aad062010a --- /dev/null +++ b/docs/api-ref/deduplications/get-deduplication-fields.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /deduplications/fields +--- \ No newline at end of file diff --git a/docs/api-ref/deduplications/get-deduplications.mdx b/docs/api-ref/deduplications/get-deduplications.mdx new file mode 100644 index 0000000000..0160b287bf --- /dev/null +++ b/docs/api-ref/deduplications/get-deduplications.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /deduplications +--- \ No newline at end of file diff --git a/docs/api-ref/deduplications/update-deduplication-rule.mdx b/docs/api-ref/deduplications/update-deduplication-rule.mdx new file mode 100644 index 0000000000..cd18c240de --- /dev/null +++ b/docs/api-ref/deduplications/update-deduplication-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /deduplications/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/create-extraction-rule.mdx b/docs/api-ref/enrichment/create-extraction-rule.mdx new file mode 100644 index 0000000000..235f1589fd --- /dev/null +++ b/docs/api-ref/enrichment/create-extraction-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /extraction +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/create-rule.mdx b/docs/api-ref/enrichment/create-rule.mdx new file mode 100644 index 0000000000..6994f6aab7 --- /dev/null +++ b/docs/api-ref/enrichment/create-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /mapping +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/delete-extraction-rule.mdx b/docs/api-ref/enrichment/delete-extraction-rule.mdx new file mode 100644 index 0000000000..fc9571b0ed --- /dev/null +++ b/docs/api-ref/enrichment/delete-extraction-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /extraction/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/delete-rule.mdx b/docs/api-ref/enrichment/delete-rule.mdx new file mode 100644 index 0000000000..4a7a1c3866 --- /dev/null +++ b/docs/api-ref/enrichment/delete-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /mapping/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/get-extraction-rules.mdx b/docs/api-ref/enrichment/get-extraction-rules.mdx new file mode 100644 index 0000000000..619c38eca8 --- /dev/null +++ b/docs/api-ref/enrichment/get-extraction-rules.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /extraction +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/get-rules.mdx b/docs/api-ref/enrichment/get-rules.mdx new file mode 100644 index 0000000000..b1ac11c0b9 --- /dev/null +++ b/docs/api-ref/enrichment/get-rules.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /mapping +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/update-extraction-rule.mdx b/docs/api-ref/enrichment/update-extraction-rule.mdx new file mode 100644 index 0000000000..3b0dfcc7df --- /dev/null +++ b/docs/api-ref/enrichment/update-extraction-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /extraction/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/enrichment/update-rule.mdx b/docs/api-ref/enrichment/update-rule.mdx new file mode 100644 index 0000000000..842be2e45d --- /dev/null +++ b/docs/api-ref/enrichment/update-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /mapping +--- \ No newline at end of file diff --git a/docs/api-ref/groups/get-groups.mdx b/docs/api-ref/groups/get-groups.mdx new file mode 100644 index 0000000000..c7d6e31136 --- /dev/null +++ b/docs/api-ref/groups/get-groups.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /groups/ +--- \ No newline at end of file diff --git a/docs/api-ref/healthcheck/healthcheck.mdx b/docs/api-ref/healthcheck/healthcheck.mdx new file mode 100644 index 0000000000..c2e4577351 --- /dev/null +++ b/docs/api-ref/healthcheck/healthcheck.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /healthcheck +--- diff --git a/docs/api-ref/incidents/add-alerts-to-incident.mdx b/docs/api-ref/incidents/add-alerts-to-incident.mdx new file mode 100644 index 0000000000..f77431dba0 --- /dev/null +++ b/docs/api-ref/incidents/add-alerts-to-incident.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /incidents/{incident_id}/alerts +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/change-incident-status.mdx b/docs/api-ref/incidents/change-incident-status.mdx new file mode 100644 index 0000000000..a5c07ccf07 --- /dev/null +++ b/docs/api-ref/incidents/change-incident-status.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /incidents/{incident_id}/status +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/confirm-incident.mdx b/docs/api-ref/incidents/confirm-incident.mdx new file mode 100644 index 0000000000..6fc9b4557c --- /dev/null +++ b/docs/api-ref/incidents/confirm-incident.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /incidents/{incident_id}/confirm +--- diff --git a/docs/api-ref/incidents/create-incident-endpoint.mdx b/docs/api-ref/incidents/create-incident-endpoint.mdx new file mode 100644 index 0000000000..20739ac75d --- /dev/null +++ b/docs/api-ref/incidents/create-incident-endpoint.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /incidents +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/delete-alerts-from-incident.mdx b/docs/api-ref/incidents/delete-alerts-from-incident.mdx new file mode 100644 index 0000000000..99aae77023 --- /dev/null +++ b/docs/api-ref/incidents/delete-alerts-from-incident.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /incidents/{incident_id}/alerts +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/delete-incident.mdx b/docs/api-ref/incidents/delete-incident.mdx new file mode 100644 index 0000000000..11a7ad94ea --- /dev/null +++ b/docs/api-ref/incidents/delete-incident.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /incidents/{incident_id} +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/get-all-incidents.mdx b/docs/api-ref/incidents/get-all-incidents.mdx new file mode 100644 index 0000000000..9324827982 --- /dev/null +++ b/docs/api-ref/incidents/get-all-incidents.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /incidents +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/get-incident-alerts.mdx b/docs/api-ref/incidents/get-incident-alerts.mdx new file mode 100644 index 0000000000..2ec1bb9d10 --- /dev/null +++ b/docs/api-ref/incidents/get-incident-alerts.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /incidents/{incident_id}/alerts +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/get-incident.mdx b/docs/api-ref/incidents/get-incident.mdx new file mode 100644 index 0000000000..0e5b2a991f --- /dev/null +++ b/docs/api-ref/incidents/get-incident.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /incidents/{incident_id} +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/update-incident-1.mdx b/docs/api-ref/incidents/update-incident-1.mdx new file mode 100644 index 0000000000..9f6e0e5665 --- /dev/null +++ b/docs/api-ref/incidents/update-incident-1.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /incidents/{incident_id}/confirm +--- \ No newline at end of file diff --git a/docs/api-ref/incidents/update-incident.mdx b/docs/api-ref/incidents/update-incident.mdx new file mode 100644 index 0000000000..9201b30304 --- /dev/null +++ b/docs/api-ref/incidents/update-incident.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /incidents/{incident_id} +--- \ No newline at end of file diff --git a/docs/api-ref/maintenance/create-maintenance-rule.mdx b/docs/api-ref/maintenance/create-maintenance-rule.mdx new file mode 100644 index 0000000000..5d848366c2 --- /dev/null +++ b/docs/api-ref/maintenance/create-maintenance-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /maintenance +--- \ No newline at end of file diff --git a/docs/api-ref/maintenance/delete-maintenance-rule.mdx b/docs/api-ref/maintenance/delete-maintenance-rule.mdx new file mode 100644 index 0000000000..7ea7ab0c05 --- /dev/null +++ b/docs/api-ref/maintenance/delete-maintenance-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /maintenance/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/maintenance/get-maintenance-rules.mdx b/docs/api-ref/maintenance/get-maintenance-rules.mdx new file mode 100644 index 0000000000..98d9101c2b --- /dev/null +++ b/docs/api-ref/maintenance/get-maintenance-rules.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /maintenance +--- \ No newline at end of file diff --git a/docs/api-ref/maintenance/update-maintenance-rule.mdx b/docs/api-ref/maintenance/update-maintenance-rule.mdx new file mode 100644 index 0000000000..e6ed67ec3e --- /dev/null +++ b/docs/api-ref/maintenance/update-maintenance-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /maintenance/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/mapping/create-mapping.mdx b/docs/api-ref/mapping/create-mapping.mdx new file mode 100644 index 0000000000..6994f6aab7 --- /dev/null +++ b/docs/api-ref/mapping/create-mapping.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /mapping +--- \ No newline at end of file diff --git a/docs/api-ref/mapping/delete-mapping-by-id.mdx b/docs/api-ref/mapping/delete-mapping-by-id.mdx new file mode 100644 index 0000000000..52645c5dde --- /dev/null +++ b/docs/api-ref/mapping/delete-mapping-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /mapping/{mapping_id} +--- \ No newline at end of file diff --git a/docs/api-ref/mapping/get-mappings.mdx b/docs/api-ref/mapping/get-mappings.mdx new file mode 100644 index 0000000000..b1ac11c0b9 --- /dev/null +++ b/docs/api-ref/mapping/get-mappings.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /mapping +--- \ No newline at end of file diff --git a/docs/api-ref/metrics/get-metrics.mdx b/docs/api-ref/metrics/get-metrics.mdx new file mode 100644 index 0000000000..4bbb01c3c1 --- /dev/null +++ b/docs/api-ref/metrics/get-metrics.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /metrics +--- \ No newline at end of file diff --git a/docs/api-ref/preset/create-preset-tab.mdx b/docs/api-ref/preset/create-preset-tab.mdx new file mode 100644 index 0000000000..043d23b7d5 --- /dev/null +++ b/docs/api-ref/preset/create-preset-tab.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /preset/{preset_id}/tab +--- \ No newline at end of file diff --git a/docs/api-ref/preset/create-preset.mdx b/docs/api-ref/preset/create-preset.mdx new file mode 100644 index 0000000000..8925cb3231 --- /dev/null +++ b/docs/api-ref/preset/create-preset.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /preset +--- \ No newline at end of file diff --git a/docs/api-ref/preset/delete-preset.mdx b/docs/api-ref/preset/delete-preset.mdx new file mode 100644 index 0000000000..9e770eab09 --- /dev/null +++ b/docs/api-ref/preset/delete-preset.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /preset/{uuid} +--- \ No newline at end of file diff --git a/docs/api-ref/preset/delete-tab.mdx b/docs/api-ref/preset/delete-tab.mdx new file mode 100644 index 0000000000..646b51c76b --- /dev/null +++ b/docs/api-ref/preset/delete-tab.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /preset/{preset_id}/tab/{tab_id} +--- \ No newline at end of file diff --git a/docs/api-ref/preset/get-preset-alerts.mdx b/docs/api-ref/preset/get-preset-alerts.mdx new file mode 100644 index 0000000000..262e516b23 --- /dev/null +++ b/docs/api-ref/preset/get-preset-alerts.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /preset/{preset_name}/alerts +--- \ No newline at end of file diff --git a/docs/api-ref/preset/get-presets.mdx b/docs/api-ref/preset/get-presets.mdx new file mode 100644 index 0000000000..33c3d5aeae --- /dev/null +++ b/docs/api-ref/preset/get-presets.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /preset +--- \ No newline at end of file diff --git a/docs/api-ref/preset/update-preset.mdx b/docs/api-ref/preset/update-preset.mdx new file mode 100644 index 0000000000..669be7d4ca --- /dev/null +++ b/docs/api-ref/preset/update-preset.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /preset/{uuid} +--- \ No newline at end of file diff --git a/docs/api-ref/providers/add-alert.mdx b/docs/api-ref/providers/add-alert.mdx new file mode 100644 index 0000000000..cd6c35f17c --- /dev/null +++ b/docs/api-ref/providers/add-alert.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /providers/{provider_type}/{provider_id}/alerts +--- diff --git a/docs/api-ref/providers/delete-provider.mdx b/docs/api-ref/providers/delete-provider.mdx new file mode 100644 index 0000000000..2e3188d912 --- /dev/null +++ b/docs/api-ref/providers/delete-provider.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /providers/{provider_type}/{provider_id} +--- diff --git a/docs/api-ref/providers/export-providers.mdx b/docs/api-ref/providers/export-providers.mdx new file mode 100644 index 0000000000..24c5b8040b --- /dev/null +++ b/docs/api-ref/providers/export-providers.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers/export +--- diff --git a/docs/api-ref/providers/get-alert-count.mdx b/docs/api-ref/providers/get-alert-count.mdx new file mode 100644 index 0000000000..e7372237be --- /dev/null +++ b/docs/api-ref/providers/get-alert-count.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers/{provider_type}/{provider_id}/alerts/count +--- \ No newline at end of file diff --git a/docs/api-ref/providers/get-alerts-configuration.mdx b/docs/api-ref/providers/get-alerts-configuration.mdx new file mode 100644 index 0000000000..56570a63e4 --- /dev/null +++ b/docs/api-ref/providers/get-alerts-configuration.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers/{provider_type}/{provider_id}/configured-alerts +--- diff --git a/docs/api-ref/providers/get-alerts-schema.mdx b/docs/api-ref/providers/get-alerts-schema.mdx new file mode 100644 index 0000000000..49a5210298 --- /dev/null +++ b/docs/api-ref/providers/get-alerts-schema.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers/{provider_type}/schema +--- diff --git a/docs/api-ref/providers/get-installed-providers.mdx b/docs/api-ref/providers/get-installed-providers.mdx new file mode 100644 index 0000000000..4de9cceb22 --- /dev/null +++ b/docs/api-ref/providers/get-installed-providers.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers/export +--- \ No newline at end of file diff --git a/docs/api-ref/providers/get-logs.mdx b/docs/api-ref/providers/get-logs.mdx new file mode 100644 index 0000000000..a153513c21 --- /dev/null +++ b/docs/api-ref/providers/get-logs.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers/{provider_type}/{provider_id}/logs +--- diff --git a/docs/api-ref/providers/get-providers.mdx b/docs/api-ref/providers/get-providers.mdx new file mode 100644 index 0000000000..c377cc2661 --- /dev/null +++ b/docs/api-ref/providers/get-providers.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers +--- diff --git a/docs/api-ref/providers/get-webhook-settings.mdx b/docs/api-ref/providers/get-webhook-settings.mdx new file mode 100644 index 0000000000..4771808d78 --- /dev/null +++ b/docs/api-ref/providers/get-webhook-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /providers/{provider_type}/webhook +--- diff --git a/docs/api-ref/providers/install-provider-oauth2.mdx b/docs/api-ref/providers/install-provider-oauth2.mdx new file mode 100644 index 0000000000..3eb4a90fb8 --- /dev/null +++ b/docs/api-ref/providers/install-provider-oauth2.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /providers/install/oauth2/{provider_type} +--- \ No newline at end of file diff --git a/docs/api-ref/providers/install-provider-webhook.mdx b/docs/api-ref/providers/install-provider-webhook.mdx new file mode 100644 index 0000000000..251b3d8462 --- /dev/null +++ b/docs/api-ref/providers/install-provider-webhook.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /providers/install/webhook/{provider_type}/{provider_id} +--- diff --git a/docs/api-ref/providers/install-provider.mdx b/docs/api-ref/providers/install-provider.mdx new file mode 100644 index 0000000000..e065001572 --- /dev/null +++ b/docs/api-ref/providers/install-provider.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /providers/install +--- diff --git a/docs/api-ref/providers/invoke-provider-method.mdx b/docs/api-ref/providers/invoke-provider-method.mdx new file mode 100644 index 0000000000..e80c49497b --- /dev/null +++ b/docs/api-ref/providers/invoke-provider-method.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /providers/{provider_id}/invoke/{method} +--- \ No newline at end of file diff --git a/docs/api-ref/providers/test-provider.mdx b/docs/api-ref/providers/test-provider.mdx new file mode 100644 index 0000000000..407b69828a --- /dev/null +++ b/docs/api-ref/providers/test-provider.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /providers/test +--- diff --git a/docs/api-ref/providers/update-provider.mdx b/docs/api-ref/providers/update-provider.mdx new file mode 100644 index 0000000000..1ee02f7edc --- /dev/null +++ b/docs/api-ref/providers/update-provider.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /providers/{provider_id} +--- \ No newline at end of file diff --git a/docs/api-ref/providers/validate-provider-scopes.mdx b/docs/api-ref/providers/validate-provider-scopes.mdx new file mode 100644 index 0000000000..64b6e58549 --- /dev/null +++ b/docs/api-ref/providers/validate-provider-scopes.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /providers/{provider_id}/scopes +--- \ No newline at end of file diff --git a/docs/api-ref/pusher/pusher-authentication.mdx b/docs/api-ref/pusher/pusher-authentication.mdx new file mode 100644 index 0000000000..ed9c2b39b3 --- /dev/null +++ b/docs/api-ref/pusher/pusher-authentication.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /pusher/auth +--- \ No newline at end of file diff --git a/docs/api-ref/rules/create-rule.mdx b/docs/api-ref/rules/create-rule.mdx new file mode 100644 index 0000000000..11c79981ed --- /dev/null +++ b/docs/api-ref/rules/create-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /rules +--- \ No newline at end of file diff --git a/docs/api-ref/rules/delete-rule.mdx b/docs/api-ref/rules/delete-rule.mdx new file mode 100644 index 0000000000..66eb16654f --- /dev/null +++ b/docs/api-ref/rules/delete-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /rules/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/rules/get-rules.mdx b/docs/api-ref/rules/get-rules.mdx new file mode 100644 index 0000000000..44e0acce0a --- /dev/null +++ b/docs/api-ref/rules/get-rules.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /rules +--- \ No newline at end of file diff --git a/docs/api-ref/rules/update-rule.mdx b/docs/api-ref/rules/update-rule.mdx new file mode 100644 index 0000000000..1e5125d5f6 --- /dev/null +++ b/docs/api-ref/rules/update-rule.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /rules/{rule_id} +--- \ No newline at end of file diff --git a/docs/api-ref/settings/create-key.mdx b/docs/api-ref/settings/create-key.mdx new file mode 100644 index 0000000000..c6928f71eb --- /dev/null +++ b/docs/api-ref/settings/create-key.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /settings/apikey +--- \ No newline at end of file diff --git a/docs/api-ref/settings/create-user.mdx b/docs/api-ref/settings/create-user.mdx new file mode 100644 index 0000000000..aa2658e3ef --- /dev/null +++ b/docs/api-ref/settings/create-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /settings/users +--- \ No newline at end of file diff --git a/docs/api-ref/settings/delete-api-key.mdx b/docs/api-ref/settings/delete-api-key.mdx new file mode 100644 index 0000000000..ed21cb1bca --- /dev/null +++ b/docs/api-ref/settings/delete-api-key.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /settings/apikey/{keyId} +--- \ No newline at end of file diff --git a/docs/api-ref/settings/delete-smtp-settings.mdx b/docs/api-ref/settings/delete-smtp-settings.mdx new file mode 100644 index 0000000000..4df0259bd5 --- /dev/null +++ b/docs/api-ref/settings/delete-smtp-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /settings/smtp +--- \ No newline at end of file diff --git a/docs/api-ref/settings/delete-user.mdx b/docs/api-ref/settings/delete-user.mdx new file mode 100644 index 0000000000..807cb53570 --- /dev/null +++ b/docs/api-ref/settings/delete-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /settings/users/{user_email} +--- \ No newline at end of file diff --git a/docs/api-ref/settings/get-keys.mdx b/docs/api-ref/settings/get-keys.mdx new file mode 100644 index 0000000000..4c8ca4e816 --- /dev/null +++ b/docs/api-ref/settings/get-keys.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /settings/apikeys +--- \ No newline at end of file diff --git a/docs/api-ref/settings/get-smtp-settings.mdx b/docs/api-ref/settings/get-smtp-settings.mdx new file mode 100644 index 0000000000..0f701924a7 --- /dev/null +++ b/docs/api-ref/settings/get-smtp-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /settings/smtp +--- \ No newline at end of file diff --git a/docs/api-ref/settings/get-sso-settings.mdx b/docs/api-ref/settings/get-sso-settings.mdx new file mode 100644 index 0000000000..d4e7f049a5 --- /dev/null +++ b/docs/api-ref/settings/get-sso-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /settings/sso +--- diff --git a/docs/api-ref/settings/get-users.mdx b/docs/api-ref/settings/get-users.mdx new file mode 100644 index 0000000000..8381a37ef4 --- /dev/null +++ b/docs/api-ref/settings/get-users.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /settings/users +--- \ No newline at end of file diff --git a/docs/api-ref/settings/test-smtp-settings.mdx b/docs/api-ref/settings/test-smtp-settings.mdx new file mode 100644 index 0000000000..64cc998d63 --- /dev/null +++ b/docs/api-ref/settings/test-smtp-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /settings/smtp/test +--- \ No newline at end of file diff --git a/docs/api-ref/settings/update-api-key.mdx b/docs/api-ref/settings/update-api-key.mdx new file mode 100644 index 0000000000..fbd6124685 --- /dev/null +++ b/docs/api-ref/settings/update-api-key.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /settings/apikey +--- \ No newline at end of file diff --git a/docs/api-ref/settings/update-smtp-settings.mdx b/docs/api-ref/settings/update-smtp-settings.mdx new file mode 100644 index 0000000000..acf77b1fcc --- /dev/null +++ b/docs/api-ref/settings/update-smtp-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /settings/smtp +--- \ No newline at end of file diff --git a/docs/api-ref/settings/webhook-settings.mdx b/docs/api-ref/settings/webhook-settings.mdx new file mode 100644 index 0000000000..2274336eb0 --- /dev/null +++ b/docs/api-ref/settings/webhook-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /settings/webhook +--- diff --git a/docs/api-ref/status/status.mdx b/docs/api-ref/status/status.mdx new file mode 100644 index 0000000000..84b74c746f --- /dev/null +++ b/docs/api-ref/status/status.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /status +--- \ No newline at end of file diff --git a/docs/api-ref/tags/get-tags.mdx b/docs/api-ref/tags/get-tags.mdx new file mode 100644 index 0000000000..8825646faf --- /dev/null +++ b/docs/api-ref/tags/get-tags.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /tags +--- \ No newline at end of file diff --git a/docs/api-ref/topology/create-application.mdx b/docs/api-ref/topology/create-application.mdx new file mode 100644 index 0000000000..6b05b9b7af --- /dev/null +++ b/docs/api-ref/topology/create-application.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /topology/applications +--- \ No newline at end of file diff --git a/docs/api-ref/topology/delete-application.mdx b/docs/api-ref/topology/delete-application.mdx new file mode 100644 index 0000000000..8e770d1245 --- /dev/null +++ b/docs/api-ref/topology/delete-application.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /topology/applications/{application_id} +--- \ No newline at end of file diff --git a/docs/api-ref/topology/get-applications.mdx b/docs/api-ref/topology/get-applications.mdx new file mode 100644 index 0000000000..792146d913 --- /dev/null +++ b/docs/api-ref/topology/get-applications.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /topology/applications +--- \ No newline at end of file diff --git a/docs/api-ref/topology/get-topology-data.mdx b/docs/api-ref/topology/get-topology-data.mdx new file mode 100644 index 0000000000..40519c6ad0 --- /dev/null +++ b/docs/api-ref/topology/get-topology-data.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /topology +--- \ No newline at end of file diff --git a/docs/api-ref/topology/update-application.mdx b/docs/api-ref/topology/update-application.mdx new file mode 100644 index 0000000000..30b72655b8 --- /dev/null +++ b/docs/api-ref/topology/update-application.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /topology/applications/{application_id} +--- \ No newline at end of file diff --git a/docs/api-ref/users/create-user.mdx b/docs/api-ref/users/create-user.mdx new file mode 100644 index 0000000000..2cf63d82e8 --- /dev/null +++ b/docs/api-ref/users/create-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /users +--- diff --git a/docs/api-ref/users/delete-user.mdx b/docs/api-ref/users/delete-user.mdx new file mode 100644 index 0000000000..30bc1fa439 --- /dev/null +++ b/docs/api-ref/users/delete-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /users/{user_email} +--- diff --git a/docs/api-ref/users/get-users.mdx b/docs/api-ref/users/get-users.mdx new file mode 100644 index 0000000000..5d58f8c452 --- /dev/null +++ b/docs/api-ref/users/get-users.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /users +--- diff --git a/docs/api-ref/users/update-user.mdx b/docs/api-ref/users/update-user.mdx new file mode 100644 index 0000000000..3e0e5eaf21 --- /dev/null +++ b/docs/api-ref/users/update-user.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /users/{user_email} +--- diff --git a/docs/api-ref/whoami/get-tenant-id.mdx b/docs/api-ref/whoami/get-tenant-id.mdx new file mode 100644 index 0000000000..947dc60485 --- /dev/null +++ b/docs/api-ref/whoami/get-tenant-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /whoami +--- \ No newline at end of file diff --git a/docs/api-ref/workflows/create-workflow-from-body.mdx b/docs/api-ref/workflows/create-workflow-from-body.mdx new file mode 100644 index 0000000000..27a3f8dc8f --- /dev/null +++ b/docs/api-ref/workflows/create-workflow-from-body.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /workflows/json +--- \ No newline at end of file diff --git a/docs/api-ref/workflows/create-workflow.mdx b/docs/api-ref/workflows/create-workflow.mdx new file mode 100644 index 0000000000..f6a47e6013 --- /dev/null +++ b/docs/api-ref/workflows/create-workflow.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /workflows +--- diff --git a/docs/api-ref/workflows/delete-workflow-by-id.mdx b/docs/api-ref/workflows/delete-workflow-by-id.mdx new file mode 100644 index 0000000000..d59228725e --- /dev/null +++ b/docs/api-ref/workflows/delete-workflow-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /workflows/{workflow_id} +--- diff --git a/docs/api-ref/workflows/export-workflows.mdx b/docs/api-ref/workflows/export-workflows.mdx new file mode 100644 index 0000000000..fb8dd59c3d --- /dev/null +++ b/docs/api-ref/workflows/export-workflows.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows/export +--- diff --git a/docs/api-ref/workflows/get-random-workflow-templates.mdx b/docs/api-ref/workflows/get-random-workflow-templates.mdx new file mode 100644 index 0000000000..0076ea95bf --- /dev/null +++ b/docs/api-ref/workflows/get-random-workflow-templates.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows/random-templates +--- \ No newline at end of file diff --git a/docs/api-ref/workflows/get-raw-workflow-by-id.mdx b/docs/api-ref/workflows/get-raw-workflow-by-id.mdx new file mode 100644 index 0000000000..c7879fb425 --- /dev/null +++ b/docs/api-ref/workflows/get-raw-workflow-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows/{workflow_id}/raw +--- \ No newline at end of file diff --git a/docs/api-ref/workflows/get-workflow-by-id.mdx b/docs/api-ref/workflows/get-workflow-by-id.mdx new file mode 100644 index 0000000000..c61b2bc3e0 --- /dev/null +++ b/docs/api-ref/workflows/get-workflow-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows/{workflow_id} +--- diff --git a/docs/api-ref/workflows/get-workflow-execution-status.mdx b/docs/api-ref/workflows/get-workflow-execution-status.mdx new file mode 100644 index 0000000000..146c0b2d6f --- /dev/null +++ b/docs/api-ref/workflows/get-workflow-execution-status.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows/{workflow_id}/runs/{workflow_execution_id} +--- diff --git a/docs/api-ref/workflows/get-workflow-executions-by-alert-fingerprint.mdx b/docs/api-ref/workflows/get-workflow-executions-by-alert-fingerprint.mdx new file mode 100644 index 0000000000..8f5abbcf3a --- /dev/null +++ b/docs/api-ref/workflows/get-workflow-executions-by-alert-fingerprint.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows/executions +--- \ No newline at end of file diff --git a/docs/api-ref/workflows/get-workflow-executions.mdx b/docs/api-ref/workflows/get-workflow-executions.mdx new file mode 100644 index 0000000000..654dfebdb3 --- /dev/null +++ b/docs/api-ref/workflows/get-workflow-executions.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows/executions/list +--- \ No newline at end of file diff --git a/docs/api-ref/workflows/get-workflows.mdx b/docs/api-ref/workflows/get-workflows.mdx new file mode 100644 index 0000000000..5a87788227 --- /dev/null +++ b/docs/api-ref/workflows/get-workflows.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /workflows +--- diff --git a/docs/api-ref/workflows/run-workflow-from-definition.mdx b/docs/api-ref/workflows/run-workflow-from-definition.mdx new file mode 100644 index 0000000000..f80b1b7921 --- /dev/null +++ b/docs/api-ref/workflows/run-workflow-from-definition.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /workflows/test +--- \ No newline at end of file diff --git a/docs/api-ref/workflows/run-workflow.mdx b/docs/api-ref/workflows/run-workflow.mdx new file mode 100644 index 0000000000..023879339f --- /dev/null +++ b/docs/api-ref/workflows/run-workflow.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /workflows/{workflow_id}/run +--- diff --git a/docs/api-ref/workflows/update-workflow-by-id.mdx b/docs/api-ref/workflows/update-workflow-by-id.mdx new file mode 100644 index 0000000000..2a8ffae2c2 --- /dev/null +++ b/docs/api-ref/workflows/update-workflow-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /workflows/{workflow_id} +--- \ No newline at end of file diff --git a/docs/applications/github.mdx b/docs/applications/github.mdx new file mode 100644 index 0000000000..e3bade5de5 --- /dev/null +++ b/docs/applications/github.mdx @@ -0,0 +1,132 @@ +--- +title: "GitHub Application" +sidebarTitle: "GitHub" +description: "The Keep GitHub Application is a powerful tool that enhances your workflow by monitoring file changes under the parent `.keep/` directory in your repositories' pull requests. It automates the process of generating AI-generated alerts from plain English and allows you to seamlessly deploy these alerts to your provider using comments." +--- + +## Getting Started + +To start using the Keep GitHub Application, follow these simple steps: + +1. Sign up and log in to the **[Keep's platform](https://platform.keephq.dev)**. +2. Install the **Keep GitHub Application** either through the onboarding screen or by visiting **[this link](https://github.com/apps/keephq)**. The installation process is straightforward and user-friendly. + + + + + +3. Connect your preferred provider, such as Datadog, by linking it to Keep's platform. This step allows Keep to seamlessly generate and deploy alerts to your chosen provider. + + + + + +4. You are now ready to go! The Keep GitHub Application is successfully integrated into your GitHub workflow. + +## How does it work? + +The Keep GitHub Application operates seamlessly in the background, ensuring that you stay informed about relevant changes in your repositories. Whenever a pull request is opened or updated, the application monitors the files under the .keep/ directory. + +Once a change is detected, the GitHub application sends an HTTP request to Keep's API smart AI layer. The AI layer analyzes the content of the changed files and together with context from the provider (existing alerts, sample logs, etc.) generates an alert based on the user provided plain English description. The AI-powered alert generation ensures accuracy and relevance. + +After the alert is generated, the Keep GitHub Application automatically comments the alert on the respective file within the pull request. This allows you, as the user, to conveniently review and verify the generated alert. + +If the generated alert meets your requirements and is ready to be deployed, you can simply leave a comment on the file. The comment should include one of the predefined emojis, such as ๐Ÿš€ or ๐Ÿ†— (refer to the ["Deploying Alerts with Emojis"](#deploying-alerts-with-emojis) section). The Keep GitHub Application recognizes these emojis as commands to proceed with the deployment process. + +This intuitive workflow streamlines the alert generation and deployment process, providing you with a seamless experience and allowing you to focus on the core aspects of your project. + +## Monitoring Files Under .keep/ Directory + +The Keep GitHub Application actively monitors the files residing within the `.keep/` directory located at the parent level of your repository. Any changes or updates made to these files will trigger the alert generation process. This allows you to focus on the essential aspects of your project while ensuring that relevant changes are promptly identified and acted upon. + +## Alert File Structure + +Each file under the `.keep/` directory represents a single alert. The structure of an alert file follows the YAML format. Below is an example of an alert file: + +```yaml title=alert-example.yaml +# The alert text in plain English +alert: | + Count the error rate (4xx-5xx) this service has in the last 10 minutes. + Alert when the threshold is above 5% out of total requests. + Send a Slack message to the #alerts-playground channel and include all the context you have" + +# The provider you've previously connected and want this alert to be generated for +provider: datadog +# You can use this to override Keep's managed API and have the GitHub application +# use the API that you run locally (using the NGROK URL) +# api_url: https://OVERRIDE-KEEP-MANAGED-API +``` + +The alert file consists of the following components: + +1. **Alert Text**: This section contains the plain English description of the alert. Write a clear and concise explanation of the conditions or criteria that should trigger the alert. You can include any relevant context to facilitate understanding and resolution. + +2. **Provider**: Specify the provider to which you want the alert to be generated. This ensures that the alert seamlessly integrates with your existing monitoring and notification infrastructure. In the example above, the alert is configured to be generated for Datadog. + +3. **API Override**: Optionally, you can include the api_url field to override Keep's managed API. This allows you to use your locally hosted API for advanced customization and integration purposes. + + + **ngrok?** + +Imagine you have a secret hideout in your backyard, but you don't want anyone to know where it is. So, you build a tunnel from your hideout to a tree in your friend's backyard. This way, you can go into the tunnel in your yard and magically come out at the tree in your friend's yard. + +Now, let's say you have a cool website or a game that you want to show your friend, but it's running on your computer at home. Your friend is far away and can't come to your house. So, you need a way to show them your website or game over the internet. + +This is where ngrok comes in! Ngrok is like a magical tunnel, just like the one you built in your backyard. It creates a secure connection between your computer and the internet. It gives your computer a special address that people can use to reach your website or game, even though it's on your computer at home. + +When you start ngrok, it opens up a tunnel between your computer and the internet. It assigns a special address to your computer, like a secret door to your website or game. When your friend enters that address in their web browser, it's as if they're walking through the tunnel and reaching your website or game on your computer. + +So, ngrok is like a magical tunnel that helps you share your website or game with others over the internet, just like the secret tunnel you built to reach your friend's backyard! + +**How to start Keep with ngrok** + +ngrok is Controlled with the `USE_NGROK` environment variable.
+Simply run Keep's API using the following command to start with ngrok: `USE_NGROK=true keep api` + +{" "} + + `USE_NGROK` is enabled by default when running with `docker-compose` + + +**How to obtain ngrok URL?** + +When `USE_NGROK` is set, Keep will start with ngrok in the background.
+You can find your private ngrok URL looking for this log line "`ngrok tunnel`": + + ```json + { + "asctime": "0000-00-00 00:00:00,000", + "message": "ngrok tunnel: https://fab5-213-57-123-130.ngrok.io", + ... + } + ``` + +The URL (https://fab5-213-57-123-130.ngrok.io in the example above) is a publicly accessible URL to your Keep API service running locally.
+ +{" "} + + You can check that the ngrok tunnel is working properly by sending a simple + HTTP GET request to `/healthcheck` Try: `curl -v + https://fab5-213-57-123-130.ngrok.io/healthcheck` in our example. + + +
+ +## Deploying Alerts with Emojis + +To deploy an alert to the specified provider, you can simply leave a comment on the respective file using the ๐Ÿš€ or ๐Ÿ†— emojis. The Keep GitHub Application recognizes these emojis as commands and will initiate the deployment process accordingly. This streamlined approach ensures a smooth and intuitive experience when deploying alerts. + +For example, by leaving a comment with the ๐Ÿš€ emoji, you can signal the Keep GitHub Application to deploy the alert to the specified provider (Datadog in our example above). + + + + + +The Keep GitHub Application will either mark the comment with ๐Ÿ‘ meaning the alert was successfully deployed or ๐Ÿ‘Ž and another comment with the failure reason in case the alert was not deployed. + + + Keep GitHub Application has a retry mechanism that automatically tries to fix + the alert in case it was not successfully deployed to the provider. If the + alert that is deployed is different from the originally generated one, Keep + Github Application will comment the updated one once again. + diff --git a/docs/cli/commands/alert-enrich.mdx b/docs/cli/commands/alert-enrich.mdx new file mode 100644 index 0000000000..c1e2c690cb --- /dev/null +++ b/docs/cli/commands/alert-enrich.mdx @@ -0,0 +1,26 @@ +--- +sidebarTitle: "keep alert enrich" +--- + +Enrich an alert. + +## Usage + +``` +Usage: keep alert enrich [OPTIONS] [PARAMS]... +``` + +## Options + + +## CLI Help + +``` +Usage: keep alert enrich [OPTIONS] [PARAMS]... + + Enrich an alert. + +Options: + --fingerprint TEXT The fingerprint of the alert to enrich. [required] + --help Show this message and exit. +``` diff --git a/docs/cli/commands/alert-get.mdx b/docs/cli/commands/alert-get.mdx new file mode 100644 index 0000000000..eb6ae050ca --- /dev/null +++ b/docs/cli/commands/alert-get.mdx @@ -0,0 +1,23 @@ +--- +sidebarTitle: "keep alert get" +--- + +Get an alert. + +## Usage + +``` +Usage: keep alert get [OPTIONS] FINGERPRINT +``` + +## Options + + +## CLI Help + +``` +Usage: keep alert get [OPTIONS] FINGERPRINT + +Options: + --help Show this message and exit. +``` diff --git a/docs/cli/commands/alert-list.mdx b/docs/cli/commands/alert-list.mdx new file mode 100644 index 0000000000..f6e2095c6e --- /dev/null +++ b/docs/cli/commands/alert-list.mdx @@ -0,0 +1,53 @@ +--- +sidebarTitle: "keep alert list" +--- + +List alerts. + +## Usage + +``` +Usage: keep alert list [OPTIONS] +``` + +## Options +* `filter`: + * Type: STRING + * Default: `none` + * Usage: `--filter +-f` + + Filter alerts based on specific attributes. E.g., --filter source=datadog + + +* `export`: + * Type: Path + * Default: `none` + * Usage: `--export` + + Export alerts to a specified JSON file. + + +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep alert list [OPTIONS] + + List alerts. + +Options: + -f, --filter TEXT Filter alerts based on specific attributes. E.g., + --filter source=datadog + + --export PATH Export alerts to a specified JSON file. + --help Show this message and exit. +``` diff --git a/docs/cli/commands/cli-alert.mdx b/docs/cli/commands/cli-alert.mdx new file mode 100644 index 0000000000..a5beb216c4 --- /dev/null +++ b/docs/cli/commands/cli-alert.mdx @@ -0,0 +1,36 @@ + +# cli alert + +Manage alerts. + +## Usage + +``` +Usage: cli alert [OPTIONS] COMMAND [ARGS]... +``` + +## Options +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: cli alert [OPTIONS] COMMAND [ARGS]... + + Manage alerts. + +Options: + --help Show this message and exit. + +Commands: + enrich Enrich an alert. + get + list List alerts. +``` diff --git a/docs/cli/commands/cli-api.mdx b/docs/cli/commands/cli-api.mdx new file mode 100644 index 0000000000..7ae2444a69 --- /dev/null +++ b/docs/cli/commands/cli-api.mdx @@ -0,0 +1,42 @@ +--- +title: "api" +sidebarTitle: "keep api" +--- + +Start the API. + +## Usage + +``` +Usage: keep api [OPTIONS] +``` + +## Options +* `multi_tenant`: + * Type: BOOL + * Default: `false` + * Usage: `--multi-tenant` + + Enable multi-tenant mode + + +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep api [OPTIONS] + + Start the API. + +Options: + --multi-tenant Enable multi-tenant mode + --help Show this message and exit. +``` diff --git a/docs/cli/commands/cli-config-new.mdx b/docs/cli/commands/cli-config-new.mdx new file mode 100644 index 0000000000..cee753e186 --- /dev/null +++ b/docs/cli/commands/cli-config-new.mdx @@ -0,0 +1,56 @@ +--- +sidebarTitle: "keep config new" +--- + +Create new config. + +## Usage + +``` +Usage: keep config new [OPTIONS]... +``` + +## Options +* `interactive`: + * Type: BOOL + * Default: `True` + * Usage: `--interactive` + + Create config interactively. + +* `url`: + * Type: STRING + * Default: `http://localhost:8080` + * Usage: `--url` + + The URL of the Keep backend server. + +* `api-key`: + * Type: STRING + * Default: `` + * Usage: `--api-key` + + The api key for authenticating over keep. + +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep config new [OPTIONS] + + create new config. + +Options: + -u, --url TEXT The url of the keep api + -a, --api-key TEXT The api key for keep + -i, --interactive Interactive mode creating keep config (default True) + --help Show this message and exit. +``` diff --git a/docs/cli/commands/cli-config-show.mdx b/docs/cli/commands/cli-config-show.mdx new file mode 100644 index 0000000000..a217484b6a --- /dev/null +++ b/docs/cli/commands/cli-config-show.mdx @@ -0,0 +1,32 @@ +--- +sidebarTitle: "keep config show" +--- + +Show keep configuration. + +## Usage + +``` +Usage: keep config show [OPTIONS]... +``` + +## Options +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep config show [OPTIONS] + + show the current config. + +Options: + --help Show this message and exit. +``` diff --git a/docs/cli/commands/cli-config.mdx b/docs/cli/commands/cli-config.mdx new file mode 100644 index 0000000000..c5c7d8cf9e --- /dev/null +++ b/docs/cli/commands/cli-config.mdx @@ -0,0 +1,37 @@ +--- +title: "config" +sidebarTitle: "keep config" +--- + +Set keep configuration. + +## Usage + +``` +Usage: keep config [OPTIONS] COMMAND [ARGS]... +``` + +## Options +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep config [OPTIONS] COMMAND [ARGS]... + + Manage the config. + +Options: + --help Show this message and exit. + +Commands: + new create new config. + show show the current config. +``` diff --git a/docs/cli/commands/cli-provider.mdx b/docs/cli/commands/cli-provider.mdx new file mode 100644 index 0000000000..42a83b40fc --- /dev/null +++ b/docs/cli/commands/cli-provider.mdx @@ -0,0 +1,36 @@ + +# cli provider + +Manage providers. + +## Usage + +``` +Usage: cli provider [OPTIONS] COMMAND [ARGS]... +``` + +## Options +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: cli provider [OPTIONS] COMMAND [ARGS]... + + Manage providers. + +Options: + --help Show this message and exit. + +Commands: + connect + delete + list List providers. +``` diff --git a/docs/cli/commands/cli-run.mdx b/docs/cli/commands/cli-run.mdx new file mode 100644 index 0000000000..8aa8e7413f --- /dev/null +++ b/docs/cli/commands/cli-run.mdx @@ -0,0 +1,108 @@ +--- +title: "run" +sidebarTitle: "keep run" +--- + +Run the alert. + +## Usage + +``` +Usage: keep run [OPTIONS] +``` + +## Options +* `alerts_directory`: + * Type: STRING + * Default: `none` + * Usage: `--alerts-directory +--alerts-file +-af` + + The path to the alert yaml/alerts directory + + +* `alert_url`: + * Type: STRING + * Default: `none` + * Usage: `--alert-url +-au` + + A url that can be used to download an alert yaml NOTE: This argument is mutually exclusive with alerts_directory + + +* `interval`: + * Type: INT + * Default: `0` + * Usage: `--interval +-i` + + When interval is set, Keep will run the alert every INTERVAL seconds + + +* `providers_file`: + * Type: STRING + * Default: `providers.yaml` + * Usage: `--providers-file +-p` + + The path to the providers yaml + + +* `tenant_id`: + * Type: STRING + * Default: `singletenant` + * Usage: `--tenant-id +-t` + + The tenant id + + +* `api_key`: + * Type: STRING + * Default: `none` + * Usage: `--api-key` + + The API key for keep's API + + +* `api_url`: + * Type: STRING + * Default: `https://s.keephq.dev` + * Usage: `--api-url` + + The URL for keep's API + + +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep run [OPTIONS] + + Run the alert. + +Options: + -af, --alerts-directory, --alerts-file PATH + The path to the alert yaml/alerts directory + -au, --alert-url TEXT A url that can be used to download an alert + yaml NOTE: This argument is mutually + exclusive with alerts_directory + + -i, --interval INTEGER When interval is set, Keep will run the + alert every INTERVAL seconds + + -p, --providers-file PATH The path to the providers yaml + -t, --tenant-id TEXT The tenant id + --api-key TEXT The API key for keep's API + --api-url TEXT The URL for keep's API + --help Show this message and exit. +``` diff --git a/docs/cli/commands/cli-version.mdx b/docs/cli/commands/cli-version.mdx new file mode 100644 index 0000000000..8334abefef --- /dev/null +++ b/docs/cli/commands/cli-version.mdx @@ -0,0 +1,33 @@ +--- +title: "version" +sidebarTitle: "keep version" +--- + +Get the library version. + +## Usage + +``` +Usage: keep version [OPTIONS] +``` + +## Options +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep version [OPTIONS] + + Get the library version. + +Options: + --help Show this message and exit. +``` diff --git a/docs/cli/commands/cli-whoami.mdx b/docs/cli/commands/cli-whoami.mdx new file mode 100644 index 0000000000..af77336756 --- /dev/null +++ b/docs/cli/commands/cli-whoami.mdx @@ -0,0 +1,33 @@ +--- +title: "whoami" +sidebarTitle: "keep whoami" +--- + +Verify the api key auth. + +## Usage + +``` +Usage: keep whoami [OPTIONS] +``` + +## Options +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: keep whoami [OPTIONS] + + Verify the api key auth. + +Options: + --help Show this message and exit. +``` diff --git a/docs/cli/commands/cli-workflow.mdx b/docs/cli/commands/cli-workflow.mdx new file mode 100644 index 0000000000..6b98cd5c9d --- /dev/null +++ b/docs/cli/commands/cli-workflow.mdx @@ -0,0 +1,37 @@ + +# cli workflow + +Manage workflows. + +## Usage + +``` +Usage: cli workflow [OPTIONS] COMMAND [ARGS]... +``` + +## Options +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: cli workflow [OPTIONS] COMMAND [ARGS]... + + Manage workflows. + +Options: + --help Show this message and exit. + +Commands: + apply Apply a workflow. + list List workflows. + run Run a workflow with a specified ID and fingerprint. + runs Manage workflows executions. +``` diff --git a/docs/cli/commands/cli.mdx b/docs/cli/commands/cli.mdx new file mode 100644 index 0000000000..a96c406f1a --- /dev/null +++ b/docs/cli/commands/cli.mdx @@ -0,0 +1,71 @@ + +# cli + +Run Keep CLI. + +## Usage + +``` +Usage: cli [OPTIONS] COMMAND [ARGS]... +``` + +## Options +* `verbose`: + * Type: IntRange(0, None) + * Default: `0` + * Usage: `--verbose +-v` + + Enable verbose output. + + +* `json`: + * Type: BOOL + * Default: `false` + * Usage: `--json +-j` + + Enable json output. + + +* `keep_config`: + * Type: STRING + * Default: `keep.yaml` + * Usage: `--keep-config +-c` + + The path to the keep config file (default keep.yaml) + + +* `help`: + * Type: BOOL + * Default: `false` + * Usage: `--help` + + Show this message and exit. + + + +## CLI Help + +``` +Usage: cli [OPTIONS] COMMAND [ARGS]... + + Run Keep CLI. + +Options: + -v, --verbose Enable verbose output. + -j, --json Enable json output. + -c, --keep-config TEXT The path to the keep config file (default keep.yaml) + --help Show this message and exit. + +Commands: + alert Manage alerts. + api Start the API. + config Get the config. + provider Manage providers. + run Run a workflow. + version Get the library version. + whoami Verify the api key auth. + workflow Manage workflows. +``` diff --git a/docs/cli/commands/extraction-create.mdx b/docs/cli/commands/extraction-create.mdx new file mode 100644 index 0000000000..eeb31230be --- /dev/null +++ b/docs/cli/commands/extraction-create.mdx @@ -0,0 +1,93 @@ +--- +sidebarTitle: "keep extraction create" +--- + +Create a extraction rule. + +## Usage + +``` +Usage: keep extraction create [OPTIONS] +``` + +## Options + +* `name` + * Type: STRING + * Default: `` + * Usage: `--name ` + + The name of the extraction. + +* `description` + * Type: STRING + * Default: `` + * Usage: `--description ` + + The description of the extraction. + +* `priority` + * Type: INTEGER RANGE + * Default: `0` + * Usage: `--priority ` + + The priority of the extraction, higher priority means this rule will execute first. `0<=x<=100`. + +* `pre` + * Type: BOOL + * Default: `false` + * Usage: `--pre
`
+
+  Whether this rule should be applied before or after the alert is standardized
+
+* `attribute`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--attribute `
+
+  Event attribute name to extract from.
+
+* `regex`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--attribute `
+
+  The regex rule to extract by. Regex format should be like python regex pattern for group matching.
+
+* `condition`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--condition `
+
+  CEL based condition.
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+## CLI Help
+
+```
+Usage: cli.py extraction create [OPTIONS]
+
+  Create a extraction rule.
+
+Options:
+  -n, --name TEXT               The name of the extraction.  [required]
+  -d, --description TEXT        The description of the extraction.
+  -p, --priority INTEGER RANGE  The priority of the extraction, higher
+                                priority means this rule will execute first.
+                                [0<=x<=100]
+  --pre BOOLEAN                 Whether this rule should be applied before or
+                                after the alert is standardized.
+  -a, --attribute TEXT          Event attribute name to extract from.
+                                [required]
+  -r, --regex TEXT              The regex rule to extract by. Regex format
+                                should be like python regex pattern for group
+                                matching.  [required]
+  -c, --condition TEXT          CEL based condition.  [required]
+  --help                        Show this message and exit.
+```
diff --git a/docs/cli/commands/extraction-delete.mdx b/docs/cli/commands/extraction-delete.mdx
new file mode 100644
index 0000000000..cd233e6b0e
--- /dev/null
+++ b/docs/cli/commands/extraction-delete.mdx
@@ -0,0 +1,41 @@
+---
+sidebarTitle: "keep extraction delete"
+---
+
+Delete an extraction with a specified ID.
+
+## Usage
+
+```
+Usage: keep extraction delete [OPTIONS]
+```
+
+## Options
+
+* `extraction-id`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--extraction-id `
+
+  The ID of the extraction to delete.
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: cli.py extraction delete [OPTIONS]
+
+  Delete a extraction with a specified ID.
+
+Options:
+  --extraction-id INTEGER  The ID of the extraction to delete.  [required]
+  --help                   Show this message and exit.
+```
diff --git a/docs/cli/commands/extractions-list.mdx b/docs/cli/commands/extractions-list.mdx
new file mode 100644
index 0000000000..9a633fd001
--- /dev/null
+++ b/docs/cli/commands/extractions-list.mdx
@@ -0,0 +1,33 @@
+---
+sidebarTitle: "keep extraction list"
+---
+
+List extractions.
+
+## Usage
+
+```
+Usage: keep extraction list [OPTIONS]
+```
+
+List mappings.
+
+## Options
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+## CLI Help
+
+```
+Usage: cli.py extraction list [OPTIONS]
+
+  List extractions.
+
+Options:
+  --help  Show this message and exit.
+```
diff --git a/docs/cli/commands/mappings-create.mdx b/docs/cli/commands/mappings-create.mdx
new file mode 100644
index 0000000000..10cf7bd4e3
--- /dev/null
+++ b/docs/cli/commands/mappings-create.mdx
@@ -0,0 +1,75 @@
+---
+sidebarTitle: "keep mappings create"
+---
+
+Create a mapping rule.
+
+## Usage
+
+```
+Usage: keep mappings create [OPTIONS]
+```
+
+## Options
+
+* `name`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--name `
+
+  The name of the mapping.
+
+* `description`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--description `
+
+  The description of the mapping.
+
+* `file`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--file `
+
+  The mapping file. Must be a CSV file.
+
+* `matchers`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--matchers `
+
+  The matchers of the mapping, as a comma-separated list of strings.
+
+* `priority`
+  * Type: INTEGER RANGE
+  * Default: `0`
+  * Usage: `--priority `
+
+  The priority of the mapping, higher priority means this rule will execute first. `0<=x<=100`.
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+## CLI Help
+
+```
+Usage: keep mappings create [OPTIONS]
+
+  Create a mapping rule.
+
+Options:
+  -n, --name TEXT               The name of the mapping.  [required]
+  -d, --description TEXT        The description of the mapping.
+  -f, --file PATH               The mapping file. Must be a CSV file.
+                                [required]
+  -m, --matchers TEXT           The matchers of the mapping, as a comma-
+                                separated list of strings.  [required]
+  -p, --priority INTEGER RANGE  The priority of the mapping, higher priority
+                                means this rule will execute first.
+                                [0<=x<=100]
+  --help                        Show this message and exit.
+```
diff --git a/docs/cli/commands/mappings-delete.mdx b/docs/cli/commands/mappings-delete.mdx
new file mode 100644
index 0000000000..fc54d57d71
--- /dev/null
+++ b/docs/cli/commands/mappings-delete.mdx
@@ -0,0 +1,41 @@
+---
+sidebarTitle: "keep mappings delete"
+---
+
+Delete a mapping with a specified ID.
+
+## Usage
+
+```
+Usage: keep mappings delete [OPTIONS]
+```
+
+## Options
+
+* `mapping-id`
+  * Type: STRING
+  * Default: ``
+  * Usage: `--mapping-id `
+
+  The ID of the mapping to delete.
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: keep mappings delete [OPTIONS]
+
+  Delete a mapping with a specified ID
+
+Options:
+  --mapping-id INTEGER  The ID of the mapping to delete.  [required]
+  --help                Show this message and exit.
+```
diff --git a/docs/cli/commands/mappings-list.mdx b/docs/cli/commands/mappings-list.mdx
new file mode 100644
index 0000000000..79558047de
--- /dev/null
+++ b/docs/cli/commands/mappings-list.mdx
@@ -0,0 +1,33 @@
+---
+sidebarTitle: "keep mappings list"
+---
+
+List mappings.
+
+## Usage
+
+```
+Usage: keep mappings [OPTIONS]
+```
+
+List mappings.
+
+## Options
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+## CLI Help
+
+```
+Usage: keep mappings list [OPTIONS]
+
+  List mappings.
+
+Options:
+  --help  Show this message and exit.
+```
diff --git a/docs/cli/commands/provider-connect.mdx b/docs/cli/commands/provider-connect.mdx
new file mode 100644
index 0000000000..e7e6419606
--- /dev/null
+++ b/docs/cli/commands/provider-connect.mdx
@@ -0,0 +1,24 @@
+---
+sidebarTitle: "keep provider connect"
+---
+
+Connect a provider.
+
+## Usage
+
+```
+Usage: keep provider connect [OPTIONS] PROVIDER_TYPE [PARAMS]...
+```
+
+## Options
+
+
+## CLI Help
+
+```
+Usage: keep provider connect [OPTIONS] PROVIDER_TYPE [PARAMS]...
+
+Options:
+  -h, --help                Help on how to install this provider.
+  -n, --provider-name TEXT  Every provider shuold have a name.
+```
diff --git a/docs/cli/commands/provider-delete.mdx b/docs/cli/commands/provider-delete.mdx
new file mode 100644
index 0000000000..cd241e59b1
--- /dev/null
+++ b/docs/cli/commands/provider-delete.mdx
@@ -0,0 +1,23 @@
+---
+sidebarTitle: "keep provider delete"
+---
+
+Delete a provider.
+
+## Usage
+
+```
+Usage: keep provider delete [OPTIONS] [PROVIDER_ID]
+```
+
+## Options
+
+
+## CLI Help
+
+```
+Usage: keep provider delete [OPTIONS] [PROVIDER_ID]
+
+Options:
+  --help  Show this message and exit.
+```
diff --git a/docs/cli/commands/provider-list.mdx b/docs/cli/commands/provider-list.mdx
new file mode 100644
index 0000000000..8725983e02
--- /dev/null
+++ b/docs/cli/commands/provider-list.mdx
@@ -0,0 +1,42 @@
+---
+sidebarTitle: "keep provider list"
+---
+
+List providers.
+
+## Usage
+
+```
+Usage: keep provider list [OPTIONS]
+```
+
+## Options
+* `available`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--available
+-a`
+
+  List provider that you can install.
+
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: keep provider list [OPTIONS]
+
+  List providers.
+
+Options:
+  -a, --available  List provider that you can install.
+  --help           Show this message and exit.
+```
diff --git a/docs/cli/commands/runs-list.mdx b/docs/cli/commands/runs-list.mdx
new file mode 100644
index 0000000000..0d92bc4553
--- /dev/null
+++ b/docs/cli/commands/runs-list.mdx
@@ -0,0 +1,32 @@
+---
+sidebarTitle: "keep workflow runs list"
+---
+
+List workflow executions.
+
+## Usage
+
+```
+Usage: keep workflow runs list [OPTIONS]
+```
+
+## Options
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: keep workflow runs list [OPTIONS]
+
+  List workflow executions.
+
+Options:
+  --help  Show this message and exit.
+```
diff --git a/docs/cli/commands/runs-logs.mdx b/docs/cli/commands/runs-logs.mdx
new file mode 100644
index 0000000000..c9e7db491d
--- /dev/null
+++ b/docs/cli/commands/runs-logs.mdx
@@ -0,0 +1,25 @@
+---
+sidebarTitle: "keep workflow runs logs"
+---
+
+Get workflow execution logs.
+
+## Usage
+
+```
+Usage: keep workflow runs logs [OPTIONS] WORKFLOW_EXECUTION_ID
+```
+
+## Options
+
+
+## CLI Help
+
+```
+Usage: keep workflow runs logs [OPTIONS] WORKFLOW_EXECUTION_ID
+
+  Get workflow execution logs.
+
+Options:
+  --help  Show this message and exit.
+```
diff --git a/docs/cli/commands/workflow-apply.mdx b/docs/cli/commands/workflow-apply.mdx
new file mode 100644
index 0000000000..1ed9d1ab39
--- /dev/null
+++ b/docs/cli/commands/workflow-apply.mdx
@@ -0,0 +1,43 @@
+---
+sidebarTitle: "keep workflow apply"
+---
+
+
+Apply a workflow.
+
+## Usage
+
+```
+Usage: keep workflow apply [OPTIONS]
+```
+
+## Options
+* `file` (REQUIRED):
+  * Type: Path
+  * Default: `none`
+  * Usage: `--file
+-f`
+
+  The workflow file
+
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: keep workflow apply [OPTIONS]
+
+  Apply a workflow.
+
+Options:
+  -f, --file PATH  The workflow file  [required]
+  --help           Show this message and exit.
+```
diff --git a/docs/cli/commands/workflow-list.mdx b/docs/cli/commands/workflow-list.mdx
new file mode 100644
index 0000000000..18c98f02f3
--- /dev/null
+++ b/docs/cli/commands/workflow-list.mdx
@@ -0,0 +1,32 @@
+---
+sidebarTitle: "keep workflow list"
+---
+
+List workflows.
+
+## Usage
+
+```
+Usage: keep workflow list [OPTIONS]
+```
+
+## Options
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: keep workflow list [OPTIONS]
+
+  List workflows.
+
+Options:
+  --help  Show this message and exit.
+```
diff --git a/docs/cli/commands/workflow-run.mdx b/docs/cli/commands/workflow-run.mdx
new file mode 100644
index 0000000000..2b9193a920
--- /dev/null
+++ b/docs/cli/commands/workflow-run.mdx
@@ -0,0 +1,50 @@
+---
+sidebarTitle: "keep workflow run"
+---
+
+Run a workflow with a specified ID and fingerprint.
+
+## Usage
+
+```
+Usage: keep workflow run [OPTIONS]
+```
+
+## Options
+* `workflow_id` (REQUIRED):
+  * Type: STRING
+  * Default: `none`
+  * Usage: `--workflow-id`
+
+  The ID (UUID or name) of the workflow to run
+
+
+* `fingerprint` (REQUIRED):
+  * Type: STRING
+  * Default: `none`
+  * Usage: `--fingerprint`
+
+  The fingerprint to query the payload
+
+
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: keep workflow run [OPTIONS]
+
+  Run a workflow with a specified ID and fingerprint.
+
+Options:
+  --workflow-id TEXT  The ID (UUID or name) of the workflow to run  [required]
+  --fingerprint TEXT  The fingerprint to query the payload  [required]
+  --help              Show this message and exit.
+```
diff --git a/docs/cli/commands/workflow-runs.mdx b/docs/cli/commands/workflow-runs.mdx
new file mode 100644
index 0000000000..051ac4b29a
--- /dev/null
+++ b/docs/cli/commands/workflow-runs.mdx
@@ -0,0 +1,36 @@
+---
+sidebarTitle: "keep workflow runs"
+---
+
+Manage workflows executions.
+
+## Usage
+
+```
+Usage: cli workflow runs [OPTIONS] COMMAND [ARGS]...
+```
+
+## Options
+* `help`:
+  * Type: BOOL
+  * Default: `false`
+  * Usage: `--help`
+
+  Show this message and exit.
+
+
+
+## CLI Help
+
+```
+Usage: cli workflow runs [OPTIONS] COMMAND [ARGS]...
+
+  Manage workflows executions.
+
+Options:
+  --help  Show this message and exit.
+
+Commands:
+  list  List workflow executions.
+  logs  Get workflow execution logs.
+```
diff --git a/docs/cli/github-actions.mdx b/docs/cli/github-actions.mdx
new file mode 100644
index 0000000000..8925e923e9
--- /dev/null
+++ b/docs/cli/github-actions.mdx
@@ -0,0 +1,59 @@
+---
+title: "Sync Keep Workflows With Github Action"
+---
+
+This documentation provides a detailed guide on how to use the Keep CLI within a GitHub Actions workflow to synchronize and manage Keep workflows from a directory. This setup automates the process of uploading workflows to Keep, making it easier to maintain and update them.
+
+
+
+
+
+### Configuration
+To set up this workflow in your repository:
+
+- Add the workflow YAML file to your repository under `.github/workflows/`.
+- Set your Keep API Key and URL as secrets in your repository settings if you haven't already.
+- Make changes to your workflows in the specified directory or trigger the workflow manually through the GitHub UI.
+- Change 'example/workflows/**' to the directory you store your Keep Workflows.
+
+
+### GitHub Action Workflow
+This GitHub Actions workflow automatically synchronizes workflows from a specified directory to Keep whenever there are changes. It also allows for manual triggering with optional parameters.
+
+```yaml
+# A workflow that sync Keep workflows from a directory
+name: "Sync Keep Workflows"
+
+on:
+    push:
+        paths:
+          - 'examples/workflows/**'
+    workflow_dispatch:
+        inputs:
+            keep_api_key:
+              description: 'Keep API Key'
+              required: false
+            keep_api_url:
+              description: 'Keep API URL'
+              required: false
+              default: 'https://api.keephq.dev'
+
+jobs:
+    sync-workflows:
+        name: Sync workflows to Keep
+        runs-on: ubuntu-latest
+        container:
+            image: us-central1-docker.pkg.dev/keephq/keep/keep-cli:latest
+        env:
+            KEEP_API_KEY: ${{ secrets.KEEP_API_KEY || github.event.inputs.keep_api_key }}
+            KEEP_API_URL: ${{ secrets.KEEP_API_URL || github.event.inputs.keep_api_url }}
+
+        steps:
+        - name: Check out the repo
+          uses: actions/checkout@v2
+
+        - name: Run Keep CLI
+          run: |
+            keep workflow apply -f examples/workflows
+
+```
diff --git a/docs/cli/installation.mdx b/docs/cli/installation.mdx
new file mode 100644
index 0000000000..3020dd4712
--- /dev/null
+++ b/docs/cli/installation.mdx
@@ -0,0 +1,83 @@
+---
+title: "Installation"
+---
+Missing an installation? submit a new installation  request and we will add it as soon as we can.
+
+
+We recommend to install Keep CLI with Python version 3.11 for optimal compatibility and performance.
+This choice ensures seamless integration with all dependencies, including pyarrow, which currently does not support Python 3.12
+
+
+Need Keep CLI on other versions? Feel free to contact us! 
+
+## Clone and install (Option 1)
+
+### Install
+First, clone Keep repository:
+
+```shell
+git clone https://github.com/keephq/keep.git && cd keep
+```
+
+Install Keep CLI with `pip`:
+
+```shell
+pip install .
+```
+or with `poetry`:
+
+```shell
+poetry install
+```
+
+From now on, Keep should be installed locally and accessible from your CLI, test it by executing:
+
+```
+keep version
+```
+
+### Test
+Get a Slack Incoming Webhook using [this tutorial](https://api.slack.com/messaging/webhooks) and use use Keep to configure it
+
+```
+keep config provider --provider-type slack --provider-id slack-demo
+```
+Paste the Slack Incoming Webhook URL (e.g. https://hooks.slack.com/services/...) and you're good to go ๐Ÿ‘Œ
+
+Let's now execute our example "Paper DB has insufficient disk space" alert
+
+```bash
+keep run --alerts-file examples/workflows/db_disk_space.yml
+```
+
+Congrats ๐Ÿฅณ You should have received your first "Dunder Mifflin Paper Company" alert in Slack by now.
+
+
+## Docker image (Option 2)
+### Install
+
+```
+docker run -v ${PWD}:/app -it us-central1-docker.pkg.dev/keephq/keep/keep-cli config provider --provider-type slack --provider-id slack-demo
+```
+
+### Test
+```
+docker run -v ${PWD}:/app -it us-central1-docker.pkg.dev/keephq/keep/keep-cli -j run --alert-url https://raw.githubusercontent.com/keephq/keep/main/examples/alerts/db_disk_space.yml
+```
+
+
+## Enable Auto Completion
+Keep's CLI supports shell auto-completion, which can make your life a whole lot easier ๐Ÿ˜Œ
+If you're using zsh
+
+```shell title=~/.zshrc
+eval "$(_KEEP_COMPLETE=zsh_source keep)"
+```
+
+If you're using bash
+
+```bash title=~/.bashrc
+eval "$(_KEEP_COMPLETE=bash_source keep)"
+```
+
+Using eval means that the command is invoked and evaluated every time a shell is started, which can delay shell responsiveness. To speed it up, write the generated script to a file, then source that.
diff --git a/docs/cli/overview.mdx b/docs/cli/overview.mdx
new file mode 100644
index 0000000000..f74599697b
--- /dev/null
+++ b/docs/cli/overview.mdx
@@ -0,0 +1,7 @@
+---
+title: "Overview"
+---
+
+Keep CLI allow you to manage Keep from CLI.
+
+Start by [installing](/cli/installation) Keep CLI and [running a workflow](/cli/commands/cli-run).
diff --git a/docs/deployment/authentication/auth0-auth.mdx b/docs/deployment/authentication/auth0-auth.mdx
new file mode 100644
index 0000000000..9996aca01b
--- /dev/null
+++ b/docs/deployment/authentication/auth0-auth.mdx
@@ -0,0 +1,44 @@
+---
+title: "Auth0 Authentication"
+---
+
+This feature is a part of Keep Enterprise. Talk to us to get access: https://www.keephq.dev/meet-keep
+
+Keep supports multi-tenant environments through Auth0, enabling separate tenants to operate independently within the same Keep platform.
+
+
+  
+
+
+### When to Use
+
+- **Already using Auth0:** If you are already using Auth0 in your organization, you can leverage it as Keep authentication provider.
+- **SSO/SAML:** Auth0 supports various Single Sign-On (SSO) and SAML protocols, allowing you to integrate Keep with your existing identity management systems.
+
+### Setup Instructions
+
+To start Keep with Auth0 authentication, set the following environment variables:
+
+#### Frontend Environment Variables
+
+| Environment Variable | Description | Required | Default Value |
+|--------------------|-----------|:--------:|:-------------:|
+| AUTH_TYPE | Set to 'AUTH0' for Auth0 authentication | Yes | - |
+| AUTH0_DOMAIN | Your Auth0 domain | Yes | - |
+| AUTH0_CLIENT_ID | Your Auth0 client ID | Yes | - |
+| AUTH0_CLIENT_SECRET | Your Auth0 client secret | Yes | - |
+| AUTH0_ISSUER | Your Auth0 API issuer | Yes | - |
+
+#### Backend Environment Variables
+
+| Environment Variable | Description | Required | Default Value |
+|--------------------|-----------|:--------:|:-------------:|
+| AUTH_TYPE | Set to 'AUTH0' for Auth0 authentication | Yes | - |
+| AUTH0_MANAGEMENT_DOMAIN | Your Auth0 management domain | Yes | - |
+| AUTH0_CLIENT_ID | Your Auth0 client ID | Yes | - |
+| AUTH0_CLIENT_SECRET | Your Auth0 client secret | Yes | - |
+| AUTH0_AUDIENCE | Your Auth0 API audience | Yes | - |
+
+### Example configuration
+
+Use the `docker-compose-with-auth0.yml` for an easy setup, which includes necessary environment variables for enabling Auth0 authentication.
diff --git a/docs/deployment/authentication/db-auth.mdx b/docs/deployment/authentication/db-auth.mdx
new file mode 100644
index 0000000000..1a1e748289
--- /dev/null
+++ b/docs/deployment/authentication/db-auth.mdx
@@ -0,0 +1,33 @@
+---
+title: "DB Authentication"
+---
+
+For applications requiring user management and authentication, Keep supports basic authentication with username and password.
+
+
+  
+
+
+
+### When to Use
+
+- **Self-Hosted Deployments:** When you're deploying Keep for individual use or within an organization.
+- **Enhanced Security:** Provides a simple yet effective layer of security for your Keep instance.
+
+### Setup Instructions
+
+To start Keep with DB authentication, set the following environment variables:
+
+| Environment Variable | Description | Required | Frontend/Backend | Default Value |
+|--------------------|:-----------:|:--------:|:----------------:|:-------------:|
+| AUTH_TYPE | Set to 'DB' for database authentication | Yes | Both | - |
+| KEEP_JWT_SECRET | Secret for JWT token generation | Yes | Backend | - |
+| KEEP_DEFAULT_USERNAME | Default admin username | No | Backend | admin |
+| KEEP_DEFAULT_PASSWORD | Default admin password | No | Backend | admin |
+| KEEP_FORCE_RESET_DEFAULT_PASSWORD | Override the current admin password | No | Backend | false |
+
+### Example configuration
+
+Use the `docker-compose-with-auth.yml` for an easy setup, which includes necessary environment variables for enabling basic authentication.
diff --git a/docs/deployment/authentication/keycloak-auth.mdx b/docs/deployment/authentication/keycloak-auth.mdx
new file mode 100644
index 0000000000..255e67ed23
--- /dev/null
+++ b/docs/deployment/authentication/keycloak-auth.mdx
@@ -0,0 +1,54 @@
+---
+title: "Keycloak Authentication"
+---
+
+Keep supports Keycloak in a "managed" way where Keep auto-provisions all resources (realm, client, etc.). Keep can also work with externally managed Keycloak. To learn how, please contact the team on [Slack](https://slack.keephq.dev).
+
+This feature is a part of Keep Enterprise. Talk to us to get access: https://www.keephq.dev/meet-keep
+
+Keep integrates with Keycloak to provide a powerful and flexible authentication system for multi-tenant applications, supporting Single Sign-On (SSO) and SAML.
+
+
+  
+
+
+### When to Use
+
+- **On Prem:** When deploying Keep on-premises and requiring a robust authentication system.
+- **OSS:** If you prefer using open-source software for your authentication needs.
+- **Enterprise Protocols:** When you need support for enterprise-level protocols like SAML and OpenID Connect.
+- **Fully Customized:** When you need a highly customizable authentication solution.
+- **RBAC:** When you require Role-Based Access Control for managing user permissions.
+- **User and Group Management:** When you need advanced user and group management capabilities.
+
+### Setup Instructions
+
+To start Keep with Keycloak authentication, set the following environment variables:
+
+#### Frontend Environment Variables
+
+| Environment Variable | Description | Required | Default Value |
+|--------------------|-----------|:--------:|:-------------:|
+| AUTH_TYPE | Set to 'KEYCLOAK' for Keycloak authentication | Yes | - |
+| KEYCLOAK_ID | Your Keycloak client ID (e.g. keep) | Yes | - |
+| KEYCLOAK_ISSUER | Full URL to Your Keycloak issuer URL e.g. http://localhost:8181/auth/realms/keep | Yes | - |
+
+#### Backend Environment Variables
+
+| Environment Variable | Description | Required | Default Value |
+|--------------------|-----------|:--------:|:-------------:|
+| AUTH_TYPE | Set to 'KEYCLOAK' for Keycloak authentication | Yes | - |
+| KEYCLOAK_URL | Full URL to your Keycloak server | Yes | http://localhost:8181/auth/ |
+| KEYCLOAK_REALM | Your Keycloak realm | Yes | keep |
+| KEYCLOAK_CLIENT_ID | Your Keycloak client ID | Yes | keep |
+| KEYCLOAK_CLIENT_SECRET | Your Keycloak client secret | Yes | keep-keycloak-secret |
+| KEYCLOAK_ADMIN_USER | Admin username for Keycloak | Yes | keep_admin |
+| KEYCLOAK_ADMIN_PASSWORD | Admin password for Keycloak | Yes | keep_admin |
+| KEYCLOAK_AUDIENCE | Audience for Keycloak | Yes | realm-management |
+
+
+### Example configuration
+
+To get a better understanding on how to use Keep together with Keycloak, you can:
+- See [Keycloak](https://github.com/keephq/keep/tree/main/tests) directory for configuration, realm.json, etc
+- See Keep + Keycloak [docker-compose example](https://github.com/keephq/keep/blob/main/keycloak/docker-compose.yml)
diff --git a/docs/deployment/authentication/no-auth.mdx b/docs/deployment/authentication/no-auth.mdx
new file mode 100644
index 0000000000..cde56e73ef
--- /dev/null
+++ b/docs/deployment/authentication/no-auth.mdx
@@ -0,0 +1,23 @@
+---
+title: "No Authentication"
+---
+Using this configuration in production is not secure and strongly discouraged.
+
+
+Deploying Keep without authentication is the quickest way to get up and running, ideal for local development or internal tools where security is not a concern.
+## Setup Instructions
+Either if you use docker-compose, kubernetes, openshift or any other deployment method, add the following environment variable:
+```
+# Frontend
+AUTH_TYPE=NOAUTH
+
+# Backend
+AUTH_TYPE=NOAUTH
+```
+## Implications
+With `AUTH_TYPE=NOAUTH`:
+- Keep won't show any login page and will let you consume APIs without authentication.
+- Keep will use a JWT with "keep" as the tenant id, but will not validate it.
+- Any API key provided in the `x-api-key` header will be accepted without validation.
+
+This configuration essentially bypasses all authentication checks, making it unsuitable for production environments where security is a concern.
diff --git a/docs/deployment/authentication/oauth2proxy-auth.mdx b/docs/deployment/authentication/oauth2proxy-auth.mdx
new file mode 100644
index 0000000000..dba5d87ad4
--- /dev/null
+++ b/docs/deployment/authentication/oauth2proxy-auth.mdx
@@ -0,0 +1,32 @@
+---
+title: "OAuth2Proxy Authentication"
+---
+
+Delegate authentication to Oauth2Proxy.
+
+
+### When to Use
+
+- **oauth2-proxy user:** Use this authentication method if you want to delegate authentication to an external Oauth2Proxy service.
+
+### Setup Instructions
+
+To start Keep with Oauth2Proxy authentication, set the following environment variables:
+
+#### Frontend Environment Variables
+
+| Environment Variable | Description | Required | Default Value |
+|--------------------|-----------|:--------:|:-------------:|
+| AUTH_TYPE | Set to 'OAUTH2PROXY' for OAUTH2PROXY authentication | Yes | - |
+
+#### Backend Environment Variables
+
+| Environment Variable | Description | Required | Default Value |
+|--------------------|-----------|:--------:|:-------------:|
+| AUTH_TYPE | Set to 'OAUTH2PROXY' for OAUTH2PROXY authentication | Yes | - |
+| KEEP_OAUTH2_PROXY_USER_HEADER | Header for the authenticated user's email | Yes | x-forwarded-email |
+| KEEP_OAUTH2_PROXY_ROLE_HEADER | Header for the authenticated user's role | Yes | x-forwarded-groups |
+| KEEP_OAUTH2_PROXY_AUTO_CREATE_USER | Automatically create user if not exists | No | true |
+| KEEP_OAUTH2_PROXY_ADMIN_ROLE | Role name for admin users | No | admin |
+| KEEP_OAUTH2_PROXY_NOC_ROLE | Role name for NOC (Network Operations Center) users | No | noc |
+| KEEP_OAUTH2_PROXY_WEBHOOK_ROLE | Role name for webhook users | No | webhook |
diff --git a/docs/deployment/authentication/overview.mdx b/docs/deployment/authentication/overview.mdx
new file mode 100644
index 0000000000..3ce52bfdad
--- /dev/null
+++ b/docs/deployment/authentication/overview.mdx
@@ -0,0 +1,45 @@
+---
+title: "Overview"
+---
+
+For every authentication-related question or issue, please join our [Slack](https://slack.keephq.dev).
+
+Keep supports various authentication providers and architectures to accommodate different deployment strategies and security needs, from development environments to production setups.
+
+
+### Authentication Providers
+
+- [**No Authentication**](/deployment/authentication/no-auth) - Quick setup for testing or internal use cases.
+- [**DB**](/deployment/authentication/db-auth) - Simple username/password authentication. Works well for small teams or for dev/stage environments. Users and hashed password are stored on DB.
+- [**Auth0**](/deployment/authentication/auth0-auth) - Utilize Auth0 for scalable, auth0-based authentication.
+- [**Keycloak**](/deployment/authentication/keycloak-auth) - Utilize Keycloak for enterprise authentication methods such as SSO/SAML/OIDC, advanced RBAC with custom roles, resource-level permissions, and integration with user directories (LDAP).
+
+Choosing the right authentication strategy depends on your specific use case, security requirements, and deployment environment. You can read more about each authentication provider.
+
+
+
+### Authentication Features Comparison
+
+| Identity Provider | RBAC | SAML/OIDC/SSO | LDAP | Resource-based permission | User Management | Group Management | On Prem | License |
+|:---:|:----:|:---------:|:----:|:-------------------------:|:----------------:|:-----------------:|:-------:|:-------:|
+| **No Auth** |  โŒ   |     โŒ     |  โŒ   |             โŒ            |        โŒ        |         โŒ         |    โœ…    |   **OSS**   |
+| **DB** |  โœ… 
(Predefiend roles) | โŒ | โŒ | โœ… | โœ… | โŒ | โœ… | **OSS** | +| **Auth0** | โœ…
(Predefiend roles) | โœ… | ๐Ÿšง | ๐Ÿšง | โœ… | ๐Ÿšง | โŒ | **EE** | +| **Keycloak** | โœ…
(Custom roles) | โœ… | โœ… | โœ… | โœ… | โœ… | โœ… | **EE** | +| **Oauth2Proxy** | โœ…
(Predefiend roles) | โœ… | โŒ | โŒ | N/A | N/A | โœ… | **OSS** | +### How To Configure + +Some authentication providers require additional environment variables. These will be covered in detail on the specific authentication provider pages. + +The authentication scheme on Keep is controlled with environment variables both on the backend (Keep API) and the frontend (Keep UI). + + +| Identity Provider | Environment Variable | Additional Variables Required | +| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | +| **No Auth** | `AUTH_TYPE=NOAUTH`| None | +| **DB** | `AUTH_TYPE=DB` | `KEEP_JWT_SECRET` | +| **Auth0** | `AUTH_TYPE=AUTH0` | `AUTH0_DOMAIN`, `AUTH0_CLIENT_ID`, `AUTH0_CLIENT_SECRET` | +| **Keycloak** | `AUTH_TYPE=KEYCLOAK` | `KEYCLOAK_URL`, `KEYCLOAK_REALM`, `KEYCLOAK_CLIENT_ID`, `KEYCLOAK_CLIENT_SECRET` | +| **Oauth2Proxy** | `AUTH_TYPE=OAUTH2PROXY` | `OAUTH2_PROXY_USER_HEADER`, `OAUTH2_PROXY_ROLE_HEADER`, `OAUTH2_PROXY_AUTO_CREATE_USER` | + +For more details on each authentication strategy, including setup instructions and implications, refer to the respective sections. diff --git a/docs/deployment/configuration.mdx b/docs/deployment/configuration.mdx new file mode 100644 index 0000000000..0785cd6a97 --- /dev/null +++ b/docs/deployment/configuration.mdx @@ -0,0 +1,242 @@ +--- +title: "Configuration" +sidebarTitle: "Configuration" +--- + +## Background + +Keep is highly configurable through environment variables. This allows you to customize various aspects of both the backend and frontend components without modifying the code. Environment variables can be set in your deployment environment, such as in your Kubernetes configuration, Docker Compose file, or directly on your host system. + + +## Backend Environment Variables + +### General + +General configuration variables control the core behavior of the Keep server. These settings determine fundamental aspects such as the server's host, port, and whether certain components like the scheduler and consumer are enabled. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **KEEP_HOST** | Specifies the host for the Keep server | No | "0.0.0.0" | Valid hostname or IP address | +| **PORT** | Specifies the port on which the backend server runs | No | 8080 | Any valid port number | +| **SCHEDULER** | Enables or disables the workflow scheduler | No | "true" | "true" or "false" | +| **CONSUMER** | Enables or disables the consumer | No | "true" | "true" or "false" | +| **KEEP_VERSION** | Specifies the Keep version | No | "unknown" | Valid version string | +| **KEEP_API_URL** | Specifies the Keep API URL | No | Constructed from HOST and PORT | Valid URL | +| **KEEP_STORE_RAW_ALERTS** | Enables storing of raw alerts | No | "false" | "true" or "false" | +| **TENANT_CONFIGURATION_RELOAD_TIME** | Time in minutes to reload tenant configurations | No | 5 | Positive integer | + +### Logging and Environment + +Logging and environment configuration determines how Keep generates and formats log output. These settings are crucial for debugging, monitoring, and understanding the behavior of your Keep instance in different environments. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **LOG_LEVEL** | Sets the logging level for the application | No | "INFO" | "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | +| **ENVIRONMENT** | Specifies the environment the application is running in | No | "production" | "development", "staging", "production" | +| **LOG_FORMAT** | Specifies the log format | No | "open_telemetry" | "open_telemetry", "dev_terminal" | +| **LOG_AUTH_PAYLOAD** | Enables logging of authentication payload | No | "false" | "true" or "false" | + + +### Database + +Database configuration is crucial for Keep's data persistence. Keep supports various database backends through SQLAlchemy, allowing flexibility in choosing and configuring your preferred database system. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **DATABASE_CONNECTION_STRING** | Specifies the database connection URL | Yes | None | Valid SQLAlchemy connection string | +| **DATABASE_POOL_SIZE** | Sets the database connection pool size | No | 5 | Positive integer | +| **DATABASE_MAX_OVERFLOW** | Sets the maximum overflow for the connection pool | No | 10 | Positive integer | +| **DATABASE_ECHO** | Enables SQLAlchemy echo mode for debugging | No | False | Boolean (True/False) | +| **DB_CONNECTION_NAME** | Specifies the Cloud SQL connection name | No | "keephq-sandbox:us-central1:keep" | Valid Cloud SQL connection string | +| **DB_SERVICE_ACCOUNT** | Service account for database impersonation | No | None | Valid service account email | +| **SKIP_DB_CREATION** | Skips database creation and migrations | No | "false" | "true" or "false" | + +### Resource Provisioning + +Resource provisioning settings control how Keep sets up initial resources. This configuration is particularly important for automating the setup process and ensuring that necessary resources are available when Keep starts. + +To elaborate on resource provisioning and its configuration, please see [provisioning docs](/deployment/provision/overview). + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **PROVISION_RESOURCES** | Enables or disables resource provisioning | No | "true" | "true" or "false" | + + +### Authentication + +Authentication configuration determines how Keep verifies user identities and manages access control. These settings are essential for securing your Keep instance and integrating with various authentication providers. + +For specifc authentication type configuration, please see [authentication docs](/deployment/authentication/overview). + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **AUTH_TYPE** | Specifies the authentication type | No | "noauth" | "auth0", "keycloak", "db", "noauth", "oauth2proxy" | +| **JWT_SECRET** | Secret key for JWT token generation and validation | Yes | None | Any strong secret string | +| **JWT_ALGORITHM** | Algorithm used for JWT | No | "HS256" | Any valid JWT algorithm | +| **KEEP_DEFAULT_USERNAME** | Default username for the admin user | No | "keep" | Any valid username string | +| **KEEP_DEFAULT_PASSWORD** | Default password for the admin user | No | "keep" | Any strong password string | +| **KEEP_FORCE_RESET_DEFAULT_PASSWORD** | Forces reset of default user password | No | "false" | "true" or "false" | +| **KEEP_DEFAULT_API_KEYS** | Comma-separated list of default API keys to provision | No | "" | Format: "name:role:secret,name:role:secret" | + +### Secrets Management + +Secrets Management configuration specifies how Keep handles sensitive information. This is crucial for securely storing and accessing confidential data such as API keys and integrations credentials. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **SECRET_MANAGER_TYPE** | Defines the type of secret manager to use | Yes | "FILE" | "FILE", "GCP", "K8S", "VAULT" | +| **SECRET_MANAGER_DIRECTORY** | Directory for storing secrets when using file-based secret management | No | "/state" | Any valid directory path | + +### OpenTelemetry + +OpenTelemetry configuration enables comprehensive observability for Keep. These settings allow you to integrate Keep with various monitoring and tracing systems, enhancing your ability to debug and optimize performance. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **OTEL_SERVICE_NAME** | OpenTelemetry service name | No | "keep-api" | Valid service name string | +| **SERVICE_NAME** | Alternative for OTEL_SERVICE_NAME | No | "keep-api" | Valid service name string | +| **OTEL_EXPORTER_OTLP_ENDPOINT** | OpenTelemetry collector endpoint | No | None | Valid URL | +| **OTLP_ENDPOINT** | Alternative for OTEL_EXPORTER_OTLP_ENDPOINT | No | None | Valid URL | +| **OTEL_EXPORTER_OTLP_TRACES_ENDPOINT** | OpenTelemetry traces endpoint | No | None | Valid URL | +| **OTEL_EXPORTER_OTLP_LOGS_ENDPOINT** | OpenTelemetry logs endpoint | No | None | Valid URL | +| **OTEL_EXPORTER_OTLP_METRICS_ENDPOINT** | OpenTelemetry metrics endpoint | No | None | Valid URL | +| **CLOUD_TRACE_ENABLED** | Enables Google Cloud Trace exporter | No | "false" | "true" or "false" | +| **METRIC_OTEL_ENABLED** | Enables OpenTelemetry metrics | No | "" | "true" or "false" | + +### WebSocket Server (Pusher/Soketi) + +WebSocket server configuration controls real-time communication capabilities in Keep. These settings are important for enabling features that require instant updates and notifications. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **PUSHER_DISABLED** | Disables Pusher integration | No | "false" | "true" or "false" | +| **PUSHER_HOST** | Hostname of the Pusher server | No | None | Valid hostname or IP address | +| **PUSHER_PORT** | Port of the Pusher server | No | None | Any valid port number | +| **PUSHER_APP_ID** | Pusher application ID | Yes (if using Pusher) | None | Valid Pusher App ID | +| **PUSHER_APP_KEY** | Pusher application key | Yes (if using Pusher) | None | Valid Pusher App Key | +| **PUSHER_APP_SECRET** | Pusher application secret | Yes (if using Pusher) | None | Valid Pusher App Secret | +| **PUSHER_USE_SSL** | Enables SSL for Pusher connection | No | False | Boolean (True/False) | +| **PUSHER_CLUSTER** | Pusher cluster | No | None | Valid Pusher cluster name | + + +### OpenAPI + +OpenAPI configuration is used for integrating with OpenAI services. These settings are important if you're utilizing OpenAI capabilities within Keep for tasks such as natural language processing or AI-assisted operations. + + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **OPENAI_API_KEY** | API key for OpenAI services | No | None | Valid OpenAI API key | + + +### Posthog + +Posthog configuration controls Keep's integration with the Posthog analytics platform. These settings are useful for tracking usage patterns and gathering insights about how your Keep instance is being used. + + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **POSTHOG_API_KEY** | API key for PostHog analytics | No | "phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ" | Valid PostHog API key | +| **ENABLE_POSTHOG_API** | Enables or disables PostHog API | No | "false" | "true" or "false" | +| **DISABLE_POSTHOG** | Disables PostHog integration | No | "false" | "true" or "false" | + +### Ngrok + +Ngrok configuration enables secure tunneling to your Keep instance. These settings are particularly useful for development or when you need to expose your local Keep instance to the internet securely. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **USE_NGROK** | Enables ngrok for tunneling | No | "false" | "true" or "false" | +| **NGROK_AUTH_TOKEN** | Authentication token for ngrok | No | None | Valid ngrok auth token | +| **NGROK_DOMAIN** | Custom domain for ngrok | No | None | Valid domain name | + + +### Elasticsearch + +Elasticsearch configuration controls Keep's integration with Elasticsearch for advanced search capabilities. These settings are important if you're using Elasticsearch to enhance Keep's search functionality and performance. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **ELASTIC_ENABLED** | Enables Elasticsearch integration | No | "false" | "true" or "false" | +| **ELASTIC_API_KEY** | API key for Elasticsearch | Yes (if using Elasticsearch) | None | Valid Elasticsearch API key | +| **ELASTIC_HOSTS** | Comma-separated list of Elasticsearch hosts | Yes (if using Elasticsearch) | None | Valid Elasticsearch host URLs | +| **ELASTIC_USER** | Username for Elasticsearch basic auth | No | None | Valid username | +| **ELASTIC_PASSWORD** | Password for Elasticsearch basic auth | No | None | Valid password | +| **ELASTIC_INDEX_SUFFIX** | Suffix for Elasticsearch index names | Yes (for single tenant) | None | Any valid string | + +### Redis + +Redis configuration specifies the connection details for Keep's Redis instance. Redis is used for various caching and queueing purposes, making these settings important for optimizing Keep's performance and scalability. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **REDIS_HOST** | Redis server hostname | No | "localhost" | Valid hostname or IP address | +| **REDIS_PORT** | Redis server port | No | 6379 | Valid port number | +| **REDIS_USERNAME** | Redis username | No | None | Valid username string | +| **REDIS_PASSWORD** | Redis password | No | None | Valid password string | + +### ARQ + +ARQ (Asynchronous Task Queue) configuration controls Keep's background task processing. These settings are crucial for managing how Keep handles long-running or scheduled tasks, ensuring efficient resource utilization and responsiveness. + + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **ARQ_BACKGROUND_FUNCTIONS** | Comma-separated list of background functions to run | No | None | Valid function names | +| **ARQ_KEEP_RESULT** | Duration to keep job results (in seconds) | No | 3600 | Positive integer | +| **ARQ_EXPIRES** | Default job expiration time (in seconds) | No | 3600 | Positive integer | +| **ARQ_EXPIRES_AI** | AI job expiration time (in seconds) | No | 3600000 | Positive integer | + +## Frontend Environment Variables + +Frontend configuration variables control the behavior and features of Keep's user interface. These settings are crucial for customizing the frontend's appearance, functionality, and integration with the backend services. + + +### General + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **API_URL** | Specifies the URL of the Keep backend API | Yes | None | Valid URL | + +### Authentication + +Authentication configuration determines how Keep verifies user identities and manages access control. These settings are essential for securing your Keep instance and integrating with various authentication providers. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **AUTH_TYPE** | Specifies the authentication type | No | "noauth" | "auth0", "keycloak", "db", "noauth", "oauth2proxy" | +| **NEXTAUTH_URL** | URL for NextAuth authentication | Yes | None | Valid URL | +| **NEXTAUTH_SECRET** | Secret key for NextAuth | Yes | None | Strong secret string + +### Posthog +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **POSTHOG_KEY** | PostHog API key for frontend analytics | No | None | Valid PostHog API key | +| **POSTHOG_HOST** | PostHog Host for frontend analytics | No | None | Valid PostHog Host | + +### Pusher + +Pusher configuration is essential for enabling real-time updates and communication in Keep's frontend. These settings allow the frontend to establish a WebSocket connection with the Pusher server, facilitating instant updates and notifications. + + +| Env var | Purpose | Required | Default Value | Valid options | +|:-------------------:|:-------:|:----------:|:-------------:|:-------------:| +| **PUSHER_DISABLED** | Disables Pusher integration | No | "false" | "true" or "false" | +| **PUSHER_HOST** | Hostname of the Pusher server | No | "localhost" | Valid hostname or IP address | +| **PUSHER_PORT** | Port of the Pusher server | No | 6001 | Valid port number | +| **PUSHER_APP_KEY** | Pusher application key | Yes (if Pusher enabled) | "keepappkey" | Valid Pusher App Key | +| **PUSHER_CLUSTER** | Pusher cluster | No | None | Valid Pusher cluster name | diff --git a/docs/deployment/docker.mdx b/docs/deployment/docker.mdx new file mode 100644 index 0000000000..5b77e205ce --- /dev/null +++ b/docs/deployment/docker.mdx @@ -0,0 +1,31 @@ +--- +title: "Docker" +sidebarTitle: "Docker" +--- + +### Spin up Keep with docker-compose latest images +The easiest way to start keep is is with docker-compose: +```shell +curl https://raw.githubusercontent.com/keephq/keep/main/start.sh | sh +``` + +```bash start.sh +#!/bin/bash +# Keep install script for docker compose + +echo "Creating state directory." +mkdir -p state +test -e state +echo "Changing directory ownership to non-privileged user." +chown -R 999:999 state || echo "Unable to change directory ownership, changing permissions instead." && chmod -R 0777 state +which curl &> /dev/null || echo "curl not installed" +curl https://raw.githubusercontent.com/keephq/keep/main/docker-compose.yml --output docker-compose.yml +curl https://raw.githubusercontent.com/keephq/keep/main/docker-compose.common.yml --output docker-compose.common.yml + +docker compose up -d +``` + +The docker-compose.yml contains 3 services: +- [keep-backend](https://console.cloud.google.com/artifacts/docker/keephq/us-central1/keep/keep-api?project=keephq) - a fastapi service that as the API server. +- [keep-frontend](https://console.cloud.google.com/artifacts/docker/keephq/us-central1/keep/keep-ui?project=keephq) - a nextjs app that serves as Keep UI interface. +- [keep-websocket-server](https://docs.soketi.app/getting-started/installation/docker) - Soketi (a pusher compatible websocket server) for real time alerting. diff --git a/docs/deployment/ecs.mdx b/docs/deployment/ecs.mdx new file mode 100644 index 0000000000..097092e1a7 --- /dev/null +++ b/docs/deployment/ecs.mdx @@ -0,0 +1,154 @@ +--- +title: "AWS ECS" +sidebarTitle: "AWS ECS" +--- + +## Step 1: Login to AWS Console +- Open your web browser and navigate to the AWS Management Console. +- Log in using your AWS account credentials. + +## Step 2: Navigate to ECS +- Click on the "Services" dropdown menu in the top left corner. +- Select "ECS" from the list of services. + +## Step 3: Create 3 Task Definitions +- In the ECS dashboard, navigate to the "Task Definitions" section in the left sidebar. + Task Definition +- Click on "Create new Task Definition". + ![Create new task definition](/images/ecs-task-def-create-new.png) + + ### Task Definition 1 (Frontend - KeepUI): + + - Task Definition Family: keep-frontend + ![Task Definition Family](/images/ecs-task-def-frontend1.png) + - Configure your container definitions as below: + - Infrastructure Requirements: + - Launch Type: AWS Fargate + - OS, Architecture, Network mode: Linux/X86_64 + - Task Size: + - CPU: 1 vCPU + - Memory: 2 GB + - Task Role and Task Execution Role are optional if you plan on using secrets manager for example then create a task execution role to allow access to the secret manager you created. + ![Infrastructure Requirements](/images/ecs-task-def-frontend2.png) + - Container Details: + - Name: keep-frontend + - Image URI: us-central1-docker.pkg.dev/keephq/keep/keep-api:latest + - Ports Mapping: + - Container Port: 3000 + - Protocol: TCP + ![Container Details](/images/ecs-task-def-frontend3.png) + - Environment Variables: (This can be static or you can use parameter store or secrets manager) + - DATABASE_CONNECTION_STRING + - AUTH_TYPE + - KEEP_JWT_SECRET + - KEEP_DEFAULT_USERNAME + - KEEP_DEFAULT_PASSWORD + - SECRET_MANAGER_TYPE + - SECRET_MANAGER_DIRECTORY + - USE_NGROK + - KEEP_API_URL + (The below variable is optional if you don't want to use websocket) + - PUSHER_DISABLED + (The below variables are optional if you want to use websocket) + - PUSHER_APP_ID + - PUSHER_APP_KEY + - PUSHER_APP_SECRET + - PUSHER_HOST + - PUSHER_PORT + ![Environment Variables](/images/ecs-task-def-frontend4.png) + - Review and create your task definition. + + ### Task Definition 2 (Backend - keepAPI): + + - Configure your container definitions as below: + - Task Definition Family: keep-frontend + ![Task Definition Family](/images/ecs-task-def-backend1.png) + - Infrastructure Requirements: + - Launch Type: AWS Fargate + - OS, Architecture, Network mode: Linux/X86_64 + - Task Size: + - CPU: 1 vCPU + - Memory: 2 GB + - Task Role and Task Execution Role are optional if you plan on using secrets manager for example then create a task execution role to allow access to the secret manager you created. + ![Infrastructure Requirements](/images/ecs-task-def-backend2.png) + - Container Details: + - Name: keep-backend + - Image URI: us-central1-docker.pkg.dev/keephq/keep/keep-api:latest + - Ports Mapping: + - Container Port: 8080 + - Protocol: TCP + ![Container Details](/images/ecs-task-def-backend3.png) + - Environment Variables: (This can be static or you can use parameter store or secrets manager) + - DATABASE_CONNECTION_STRING + - AUTH_TYPE + - KEEP_JWT_SECRET + - KEEP_DEFAULT_USERNAME + - KEEP_DEFAULT_PASSWORD + - SECRET_MANAGER_TYPE + - SECRET_MANAGER_DIRECTORY + - USE_NGROK + - KEEP_API_URL + (The below variable is optional if you don't want to use websocket) + - PUSHER_DISABLED + (The below variables are optional if you want to use websocket) + - PUSHER_APP_ID + - PUSHER_APP_KEY + - PUSHER_APP_SECRET + - PUSHER_HOST + - PUSHER_PORT + ![Environment Variables](/images/ecs-task-def-backend4.png) + - Storage: + - Volume Name: keep-efs + - Configuration Type: Configure at task definition creation + - Volume type: EFS + - Storage configurations: + - File system ID: Select an exisiting EFS filesystem or create a new one + - Root Directory: / + ![Volume Configuration](/images/ecs-task-def-backend5.png) + - Container mount points: + - Container: select the container you just created + - Source volume: keep-efs + - Container path: /app + - Make sure that Readonly is not selected + ![Container Mount](/images/ecs-task-def-backend6.png) + - Review and create your task definition. + + ### Task Definition 3 (Websocket): (This step is optional if you want to have automatic refresh of the alerts feed) + + - Configure your container definitions as below: + - Task Definition Family: keep-frontend + ![Task Definition Family](/images/ecs-task-def-websocket1.png) + - Infrastructure Requirements: + - Launch Type: AWS Fargate + - OS, Architecture, Network mode: Linux/X86_64 + - Task Size: + - CPU: 0.25 vCPU + - Memory: 1 GB + - Task Role and Task Execution Role are optional if you plan on using secrets manager for example then create a task execution role to allow access to the secret manager you created. + ![Infrastructure Requirements](/images/ecs-task-def-websocket2.png) + - Container Details: + - Name: keep-websocket + - Image URI: quay.io/soketi/soketi:1.4-16-debian + - Ports Mapping: + - Container Port: 6001 + - Protocol: TCP + ![Container Details](/images/ecs-task-def-websocket3.png) + - Environment Variables: (This can be static or you can use parameter store or secrets manager) + - SOKETI_DEBUG + - SOKETI_DEFAULT_APP_ID + - SOKETI_DEFAULT_APP_KEY + - SOKETI_DEFAULT_APP_SECRET + - SOKETI_USER_AUTHENTICATION_TIMEOUT + ![Environment Variables](/images/ecs-task-def-websocket4.png) + - Review and create your task definition. + +## Step 4: Create Keep Service +- In the ECS dashboard, navigate to the "Clusters" section in the left sidebar. +- Select the cluster you want to deploy your service to. +- Click on the "Create" button next to "Services". +- Configure your service settings. +- Review and create your service. + +## Step 5: Monitor Your Service +- Once your service is created, monitor its status in the ECS dashboard. +- You can view task status, service events, and other metrics to ensure your service is running correctly. diff --git a/docs/deployment/gke.mdx b/docs/deployment/gke.mdx new file mode 100644 index 0000000000..dad98f3cd8 --- /dev/null +++ b/docs/deployment/gke.mdx @@ -0,0 +1,318 @@ +--- +title: "GKE" +sidebarTitle: "GKE" +--- + +## Step 0: Prerequisites + +1. GKE cluster (**required**) +2. kubectl and helm installed (**required**) +3. Domain + Certificate (**optional**, for TLS) + + + +## Step 1: Configure Keep's helm repo +```bash +# configure the helm repo +helm repo add keephq https://keephq.github.io/helm-charts +helm pull keephq/keep + + +# make sure you are going to install Keep +helm search repo keep +NAME CHART VERSION APP VERSION DESCRIPTION +keephq/keep 0.1.20 0.25.4 Keep Helm Chart +``` + +## Step 2: Install Keep + +Do not install Keep in your default namespace. Its best practice to create a dedicated namespace. + +Let's create a dedicated namespace and install Keep in it: +```bash +# create a dedicated namespace for Keep +kubectl create ns keep + +# Install keep +helm install -n keep keep keephq/keep --set isGKE=true --set namespace=keep + +# You should see something like: +NAME: keep +LAST DEPLOYED: Thu Oct 10 11:31:07 2024 +NAMESPACE: keep +STATUS: deployed +REVISION: 1 +TEST SUITE: None +``` + + +As number of cofiguration change from the vanilla helm chart increase, it may be more convient to create a `values.yaml` and use it: + + +```bash +cat values.yaml +isGke=true +namespace=keep + +helm install -n keep keep keephq/keep -f values.yaml +``` + + + +Now, let's make sure everything installed correctly: + +```bash +# Note: it can take few minutes until GKE assign the public IP's to the ingresses +helm-charts % kubectl -n keep get ingress,svc,pod,backendconfig +NAME CLASS HOSTS ADDRESS PORTS AGE +ingress.networking.k8s.io/keep-backend * 34.54.XXX.XXX 80 5m27s +ingress.networking.k8s.io/keep-frontend * 34.49.XXX.XXX 80 5m27s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/keep-backend ClusterIP 34.118.239.9 8080/TCP 5m28s +service/keep-database ClusterIP 34.118.228.60 3306/TCP 5m28s +service/keep-frontend ClusterIP 34.118.230.132 3000/TCP 5m28s +service/keep-websocket ClusterIP 34.118.227.128 6001/TCP 5m28s + +NAME READY STATUS RESTARTS AGE +pod/keep-backend-7466b5fcbb-5vst4 1/1 Running 0 5m27s +pod/keep-database-7c65c996f7-nl59n 1/1 Running 0 5m27s +pod/keep-frontend-6dd6897bbb-mbddn 1/1 Running 0 5m27s +pod/keep-websocket-7fc496997b-bz68z 1/1 Running 0 5m27s + +NAME AGE +backendconfig.cloud.google.com/keep-backend-backendconfig 5m28s +backendconfig.cloud.google.com/keep-frontend-backendconfig 5m28s +``` + +You can access Keep by browsing the frontend IP: +``` +frontend_ip=$(kubectl -n keep get ingress | grep frontend | awk '{ print $4 }') +``` + +Keep is now running with its vanilla configuration. This tutorial focus on how to spin up Keep on GKE using Keep's helm chart and doesn't cover all Keep's environment variables and configuration. + + + + + + + +## Step 3: Configure domain and certificate (TLS) + +### Background + +Keep has three ingresses that allow external access to its various components: + + +In this tutorial we focus om exposing the frontend, but exposing the backend and the websocket server is basically the same. + + +#### Frontend Ingress (Required) +This ingress serves the main UI of Keep. It is required for users to access the dashboard and interact with the platform. The frontend is exposed on port 80 by default (or 443 when TLS is configured) and typically points to the public-facing interface of your Keep installation. + +#### Backend Ingress (Optional, enabled by default in `values.yaml`) +This ingress provides access to the backend API, which powers all the business logic, integrations, and alerting services of Keep. The backend ingress is usually accessed by frontend components or other services through internal or external API calls. By default, this ingress is enabled in the Helm chart and exposed internally unless explicitly configured with external domain access. + +#### Websocket Ingress (Optional, disabled by default in `values.yaml`) +This ingress supports real-time communication and push updates for the frontend without requiring page reloads. It is essential for use cases where live alert updates or continuous status changes need to be reflected immediately on the dashboard. Since not every deployment requires real-time updates, the WebSocket ingress is disabled by default but can be enabled as needed by updating the Helm chart configuration. + + + +### Prerequisites + +#### Domain +e.g. keep.yourcomapny.com will be used to access Keep UI. + +#### Certificate +Both private key (.pem) and certificate (.crt) + + +There are other ways to assign the certificate to the ingress, which are not covered by this tutorial, contributions are welcomed here, just open a PR and we will review and merge. + + +1. Google's Managed Certificate - if you domain is managed by Google Cloud DNS, you can spin up the ceritificate automatically using Google's Managed Certificate. +2. Using cert-manager - you can install cert-manager and use LetsEncrypt to spin up ceritificate for Keep. + + + + +### Add an A record for the domain to point to the frontend IP +You can get the frontend IP by: +``` +frontend_ip=$(kubectl -n keep get ingress | grep frontend | awk '{ print $4 }') +``` +Now go into the domain controller and add the A record that points to that IP. + +At this stage, you should be able to access your Keep UI via http://keep.yourcomapny.com + +### Store the certificate as kubernetes secret +Assuming the private key stored as `tls.key` and the certificate stored as `tls.crt`: + +```bash +kubectl create secret tls frontend-tls --cert=./tls.crt --key=./tls.key -n keep + +# you should see: +secret/frontend-tls created +``` + + +### Upgrade Keep to use TLS + +Create this `values.yaml`: +** Note to change keep.yourcomapny.com to your domain ** + +```yaml +namespace: keep +isGKE: true +frontend: + ingress: + enabled: true + hosts: + - host: keep.yourcompany.com + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - keep.yourcompany.com + secretName: frontend-tls + env: + - name: NEXTAUTH_SECRET + value: secret + # Changed the NEXTAUTH_URL + - name: NEXTAUTH_URL + value: https://keep.yourcompany.com + # https://github.com/nextauthjs/next-auth/issues/600 + - name: VERCEL + value: 1 + - name: API_URL + value: http://keep-backend:8080 + - name: NEXT_PUBLIC_POSTHOG_KEY + value: "phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ" + - name: NEXT_PUBLIC_POSTHOG_HOST + value: https://app.posthog.com + - name: ENV + value: development + - name: NODE_ENV + value: development + - name: HOSTNAME + value: 0.0.0.0 + - name: PUSHER_HOST + value: keep-websocket.default.svc.cluster.local + - name: PUSHER_PORT + value: 6001 + - name: PUSHER_APP_KEY + value: "keepappkey" + +backend: + env: + # Added the KEEP_API_URL + - name: KEEP_API_URL + value: https://keep.yourcompany.com/backend + - name: DATABASE_CONNECTION_STRING + value: mysql+pymysql://root@keep-database:3306/keep + - name: SECRET_MANAGER_TYPE + value: k8s + - name: PORT + value: "8080" + - name: PUSHER_APP_ID + value: 1 + - name: PUSHER_APP_KEY + value: keepappkey + - name: PUSHER_APP_SECRET + value: keepappsecret + - name: PUSHER_HOST + value: keep-websocket + - name: PUSHER_PORT + value: 6001 +database: + # this is needed since o/w helm install fails. if you are using different storageClass, edit the value here. + pvc: + storageClass: "standard-rwo" +``` + +Now, update Keep: +``` +helm upgrade -n keep keep keephq/keep -f values.yaml +``` + +### Validate everything works + +First, you should be able to access Keep's UI with https now, using https://keep.yourcompany.com if that's working - you can skip the other validations. +The "Not Secure" in the screenshot is due to self-signed certificate. + + + + + +#### Validate ingress host + +```bash +kubectl -n keep get ingress + +# You should see now the HOST underyour ingress, now with port 443: +NAME CLASS HOSTS ADDRESS PORTS AGE +keep-backend * 34.54.XXX.XXX 80 2d16h +keep-frontend keep.yourcompany.com 34.49.XXX.XXX 80, 443 2d16h +``` + +#### Validate the ingress using the TLS + +You should see `frontend-tls terminates keep.yourcompany.com`: + +```bash +kubectl -n keep describe ingress.networking.k8s.io/keep-frontend +Name: keep-frontend +Labels: app.kubernetes.io/instance=keep + app.kubernetes.io/managed-by=Helm + app.kubernetes.io/name=keep + app.kubernetes.io/version=0.25.4 + helm.sh/chart=keep-0.1.21 +Namespace: keep +Address: 34.54.XXX.XXX +Ingress Class: +Default backend: +TLS: + frontend-tls terminates keep.yourcompany.com +Rules: + Host Path Backends + ---- ---- -------- + gkefrontend.keephq.dev + / keep-frontend:3000 (10.24.8.93:3000) +Annotations: ingress.kubernetes.io/backends: + {"k8s1-0864ab44-keep-keep-frontend-3000-98c56664":"HEALTHY","k8s1-0864ab44-kube-system-default-http-backend-80-2d92bedb":"HEALTHY"} + ingress.kubernetes.io/forwarding-rule: k8s2-fr-h7ydn1yg-keep-keep-frontend-ldr6qtxe + ingress.kubernetes.io/https-forwarding-rule: k8s2-fs-h7ydn1yg-keep-keep-frontend-ldr6qtxe + ingress.kubernetes.io/https-target-proxy: k8s2-ts-h7ydn1yg-keep-keep-frontend-ldr6qtxe + ingress.kubernetes.io/ssl-cert: k8s2-cr-h7ydn1yg-7taujpdzbehr1ghm-64d2ca9e282d3ef5 + ingress.kubernetes.io/static-ip: k8s2-fr-h7ydn1yg-keep-keep-frontend-ldr6qtxe + ingress.kubernetes.io/target-proxy: k8s2-tp-h7ydn1yg-keep-keep-frontend-ldr6qtxe + ingress.kubernetes.io/url-map: k8s2-um-h7ydn1yg-keep-keep-frontend-ldr6qtxe + meta.helm.sh/release-name: keep + meta.helm.sh/release-namespace: keep +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Sync 8m49s loadbalancer-controller UrlMap "k8s2-um-h7ydn1yg-keep-keep-frontend-ldr6qtxe" created + Normal Sync 8m46s loadbalancer-controller TargetProxy "k8s2-tp-h7ydn1yg-keep-keep-frontend-ldr6qtxe" created + Normal Sync 8m33s loadbalancer-controller ForwardingRule "k8s2-fr-h7ydn1yg-keep-keep-frontend-ldr6qtxe" created + Normal Sync 8m25s loadbalancer-controller TargetProxy "k8s2-ts-h7ydn1yg-keep-keep-frontend-ldr6qtxe" created + Normal Sync 8m12s loadbalancer-controller ForwardingRule "k8s2-fs-h7ydn1yg-keep-keep-frontend-ldr6qtxe" created + Normal IPChanged 8m11s loadbalancer-controller IP is now 34.54.XXX.XXX + Normal Sync 7m39s loadbalancer-controller UrlMap "k8s2-um-h7ydn1yg-keep-keep-frontend-ldr6qtxe" updated + Normal Sync 116s (x6 over 9m47s) loadbalancer-controller Scheduled for sync + ``` + +## Uninstall Keep + +### Uninstall the helm package +```bash +helm uninstall -n keep keep +``` + +### Delete the namespace + +```bash +kubectl delete ns keep +``` diff --git a/docs/deployment/kubernetes.mdx b/docs/deployment/kubernetes.mdx new file mode 100644 index 0000000000..3a7dc2902f --- /dev/null +++ b/docs/deployment/kubernetes.mdx @@ -0,0 +1,288 @@ +--- +title: "Kubernetes" +sidebarTitle: "Kubernetes" +--- + + +## Overview + +### High Level Architecture +Keep architecture composes of two main components: + +1. **Keep API** (aka keep backend) - a pythonic server (FastAPI) which serves as Keep's backend +2. **Keep Frontend** - (aka keep ui) - a nextjs server which serves as Keep's frontend + +Keep is also using the following (optional) components: + +3. **Websocket Server** - a soketi server serves as the websocket server to allow real time updates from the server to the browser without refreshing the page +4. **Database Server** - a database which Keep reads/writes for persistency. Keep currently supports sqlite, postgres, mysql and sql server (enterprise) + +### Kubernetes Architecture +Keep's Kubernetes architecture is composed of several components, each with its own set of Kubernetes resources. Here's a detailed breakdown of each component and its associated resources: + +#### General Components +Keep uses kubernetes secret manager to store secrets such as integrations credentials. + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| ServiceAccount | Provides an identity for processes that run in a Pod. Used mainly for Keep API to access kubernetes secret manager | Required | [serviceaccount.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/serviceaccount.yaml) | +| Role | Defines permissions for the ServiceAccount to manage secrets | Required | [role-secret-manager.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/role-secret-manager.yaml) | +| RoleBinding | Associates the Role with the ServiceAccount | Required | [role-binding-secret-manager.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/role-binding-secret-manager.yaml) | +| Secret Deletion Job | Cleans up Keep-related secrets when the Helm release is deleted | Required | [delete-secret-job.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/delete-secret-job.yaml) | + +#### Frontend Components + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| Frontend Deployment | Manages the frontend application containers | Required | [frontend.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend.yaml) | +| Frontend Service | Exposes the frontend deployment within the cluster | Required | [frontend-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend-service.yaml) | +| Frontend Ingress | Exposes the frontend service to external traffic | Optional | [frontend-ingress.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend-ingress.yaml) | +| Frontend Route (OpenShift) | Exposes the frontend service to external traffic on OpenShift | Optional | [frontend-route.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend-route.yaml) | +| Frontend HorizontalPodAutoscaler | Automatically scales the number of frontend pods | Optional | [frontend-hpa.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/frontend-hpa.yaml) | +| Frontend BackendConfig (GKE) | Configures health checks for Google Cloud Load Balancing | Optional (GKE only) | [backendconfig.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/gke/frontend-gke-healthcheck-config.yaml) | + +#### Backend Components + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| Backend Deployment | Manages the backend application containers | Required (if backend enabled) | [backend.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend.yaml) | +| Backend Service | Exposes the backend deployment within the cluster | Required (if backend enabled) | [backend-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend-service.yaml) | +| Backend Ingress | Exposes the backend service to external traffic | Optional | [backend-ingress.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend-ingress.yaml) | +| Backend Route (OpenShift) | Exposes the backend service to external traffic on OpenShift | Optional | [backend-route.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend-route.yaml) | +| Backend HorizontalPodAutoscaler | Automatically scales the number of backend pods | Optional | [backend-hpa.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/backend-hpa.yaml) | +| BackendConfig (GKE) | Configures health checks for Google Cloud Load Balancing | Optional (GKE only) | [backendconfig.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/gke/backend-gke-healthcheck-config.yaml) | + +#### Database Components +Database components are optional. You can spin up Keep with your own database. + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| Database Deployment | Manages the database containers (e.g. MySQL or Postgres) | Optional | [db.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db.yaml) | +| Database Service | Exposes the database deployment within the cluster | Required (if deployment enabled) | [db-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db-service.yaml) | +| Database PersistentVolume | Provides persistent storage for the database | Optional | [db-pv.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db-pv.yaml) | +| Database PersistentVolumeClaim | Claims the persistent storage for the database | Optional | [db-pvc.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/db-pvc.yaml) | + +#### WebSocket Components +WebSocket components are optional. You can spin up Keep with your own *Pusher compatible* WebSocket server. + +| Kubernetes Resource | Purpose | Required/Optional | Source | +|:-------------------:|:-------:|:-----------------:|:------:| +| WebSocket Deployment | Manages the WebSocket server containers (Soketi) | Optional | [websocket-server.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server.yaml) | +| WebSocket Service | Exposes the WebSocket deployment within the cluster | Required (if WebSocket enabled) | [websocket-server-service.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server-service.yaml) | +| WebSocket Ingress | Exposes the WebSocket service to external traffic | Optional | [websocket-server-ingress.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server-ingress.yaml) | +| WebSocket Route (OpenShift) | Exposes the WebSocket service to external traffic on OpenShift | Optional | [websocket-server-route.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server-route.yaml) | +| WebSocket HorizontalPodAutoscaler | Automatically scales the number of WebSocket server pods | Optional | [websocket-server-hpa.yaml](https://github.com/keephq/helm-charts/blob/main/charts/keep/templates/websocket-server-hpa.yaml) | + +These tables provide a comprehensive overview of the Kubernetes resources used in the Keep architecture, organized by component type. Each table describes the purpose of each resource, indicates whether it's required or optional, and provides a direct link to the source template in the Keep Helm charts GitHub repository. + + +### Kubernetes Configuration +This sections covers only kubernetes-specific configuration. To learn about Keep-specific configuration, controlled by environment variables, see [Keep Configuration](/deployment/configuration) + +Each of these components can be customized via the `values.yaml` file in the Helm chart. + + +Below are key configurations that can be adjusted for each component. + +#### 1. Frontend Configuration +```yaml +frontend: + enabled: true # Enable or disable the frontend deployment. + replicaCount: 1 # Number of frontend replicas. + image: + repository: us-central1-docker.pkg.dev/keephq/keep/keep-ui + pullPolicy: Always # Image pull policy (Always, IfNotPresent). + tag: latest + serviceAccount: + create: true # Create a new service account. + name: "" # Service account name (empty for default). + podAnnotations: {} # Annotations for frontend pods. + podSecurityContext: {} # Security context for the frontend pods. + securityContext: {} # Security context for the containers. + service: + type: ClusterIP # Service type (ClusterIP, NodePort, LoadBalancer). + port: 3000 # Port on which the frontend service is exposed. + # Enable or disable frontend ingress. + ingress: + enabled: true + hosts: + - host: keep.yourcompany.com + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - keep.yourcompany.com + secretName: frontend-tls # Secret for TLS certificates. +``` + +#### 2. Backend Configuration +```yaml +backend: + enabled: true # Enable or disable the backend deployment. + replicaCount: 1 # Number of backend replicas. + image: + repository: us-central1-docker.pkg.dev/keephq/keep/keep-api + pullPolicy: Always # Image pull policy (Always, IfNotPresent). + serviceAccount: + create: true # Create a new service account. + name: "" # Service account name (empty for default). + podAnnotations: {} # Annotations for backend pods. + podSecurityContext: {} # Security context for backend pods. + securityContext: {} # Security context for containers. + service: + type: ClusterIP # Service type (ClusterIP, NodePort, LoadBalancer). + port: 8080 # Port on which the backend API is exposed. + ingress: + enabled: true # Enable or disable backend ingress. + hosts: + - paths: + - path: / + pathType: Prefix +``` + +#### 3. WebSocket Server Configuration +Keep uses Soketi as its websocket server. To learn how to configure it, please see [Soketi docs](https://github.com/soketi/charts/tree/master/charts/soketi). + + +#### 4. Database Configuration +Keep supports plenty of database (e.g. postgresql, mysql, sqlite, etc). It is out of scope to describe here how to deploy all of them to k8s. If you have specific questions - [contact us](https://slack.keephq.dev) and we will be happy to help. + + + +## Installation +The recommended way to install Keep in kubernetes is via Helm Chart. + +First, add the Helm repository of Keep and pull the latest version of the chart: +```bash +helm repo add keephq https://keephq.github.io/helm-charts +helm pull keephq/keep +``` + +Next, install Keep using: +```bash + +# it is always recommended to install Keep in a seperate namespace +kubectl create ns keep + +helm install -n keep keep keephq/keep --set namespace=keep +``` + + +## Expose Keep with port-forward +Notice for it to work locally, you'll need this port forwarding: +``` +# expose the UI +kubectl -n keep port-forward svc/keep-frontend 3000:3000 +``` + +## Expose Keep with ingress (HTTP) +Once you are ready to expose Keep to the outer world, Keep's helm chart comes with pre-configured ingress + +```bash +kubectl -n keep get ingress +NAME CLASS HOSTS ADDRESS PORTS AGE +keep-backend 34.54.XXX.XXX 80 75m +keep-frontend 34.54.XXX.XXX 80 70m +``` + +## Expose Keep with ingress (HTTPS) + +#### Prerequisites + +1. Domain -e.g. keep.yourcomapny.com will be used to access Keep UI. +2. Certificate - both private key (.pem) and certificate (.crt) + +#### Store the certificate as kubernetes secret +Assuming the private key stored as `tls.key` and the certificate stored as `tls.crt`: + +```bash +kubectl create secret tls frontend-tls --cert=./tls.crt --key=./tls.key -n keep + +# you should see: +secret/frontend-tls created +``` + +#### Upgrade Keep to use TLS + +Create this `values.yaml`: +** Note to change keep.yourcomapny.com to your domain ** + +```yaml +namespace: keep +frontend: + ingress: + enabled: true + hosts: + - host: keep.yourcompany.com + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - keep.yourcompany.com + secretName: frontend-tls + env: + - name: NEXTAUTH_SECRET + value: secret + # Changed the NEXTAUTH_URL + - name: NEXTAUTH_URL + value: https://keep.yourcompany.com + # https://github.com/nextauthjs/next-auth/issues/600 + - name: VERCEL + value: 1 + - name: API_URL + value: http://keep-backend:8080 + - name: NEXT_PUBLIC_POSTHOG_KEY + value: "phc_muk9qE3TfZsX3SZ9XxX52kCGJBclrjhkP9JxAQcm1PZ" + - name: NEXT_PUBLIC_POSTHOG_HOST + value: https://app.posthog.com + - name: ENV + value: development + - name: NODE_ENV + value: development + - name: HOSTNAME + value: 0.0.0.0 + - name: PUSHER_HOST + value: keep-websocket.default.svc.cluster.local + - name: PUSHER_PORT + value: 6001 + - name: PUSHER_APP_KEY + value: "keepappkey" + +backend: + env: + # Added the KEEP_API_URL + - name: KEEP_API_URL + value: https://keep.yourcompany.com/backend + - name: DATABASE_CONNECTION_STRING + value: mysql+pymysql://root@keep-database:3306/keep + - name: SECRET_MANAGER_TYPE + value: k8s + - name: PORT + value: "8080" + - name: PUSHER_APP_ID + value: 1 + - name: PUSHER_APP_KEY + value: keepappkey + - name: PUSHER_APP_SECRET + value: keepappsecret + - name: PUSHER_HOST + value: keep-websocket + - name: PUSHER_PORT + value: 6001 +database: + # this is needed since o/w helm install fails. if you are using different storageClass, edit the value here. + pvc: + storageClass: "standard-rwo" +``` + +Now, update Keep: +``` +helm upgrade -n keep keep keephq/keep -f values.yaml +``` + + +To learn more about Keep's helm chart, see https://github.com/keephq/helm-charts/blob/main/README.md + +To discover about how to configure Keep using Helm, see auto generated helm-docs at https://github.com/keephq/helm-charts/blob/main/charts/keep/README.md diff --git a/docs/deployment/openshift.mdx b/docs/deployment/openshift.mdx new file mode 100644 index 0000000000..f53c223b16 --- /dev/null +++ b/docs/deployment/openshift.mdx @@ -0,0 +1,15 @@ +--- +title: "Openshift" +sidebarTitle: "Openshift" +--- + +Keep's Helm Chart also supports Openshift installation. + +Simply follow the Kubernetes set-up guide, but make sure to modify the following lines under frontend(/backend).route in the values.yaml file as follows: +``` +enabled: true +host: +path: # should be / for default +tls: +wildcardPolicy: +``` diff --git a/docs/deployment/provision/dashboard.mdx b/docs/deployment/provision/dashboard.mdx new file mode 100644 index 0000000000..fcb950409a --- /dev/null +++ b/docs/deployment/provision/dashboard.mdx @@ -0,0 +1,83 @@ +--- +title: "Dashboard Provisioning" +--- + +Provisioning dashboards in Keep allows you to configure and manage visual representations of your data. This section will guide you through the steps required to set up and provision dashboards. + +### Dashboard Provisioning Overview + +Dashboards in Keep are configured using JSON strings that define the layout, data sources, and visual components. These configurations can be managed through environment variables or configuration files. + +### Environment Variables + +To provision dashboards, you need to set the following environment variable: + +| Environment Variable | Purpose | +| -------------------- | ----------------------------------------------- | +| `KEEP_DASHBOARDS` | JSON string containing dashboard configurations | + +### Example Configuration + +Here is an example of how to set the `KEEP_DASHBOARDS` environment variable (dumped from the database): + +```json +[ + { + "dashboard_name": "My Dashboard", + "dashboard_config": { + "layout": [ + { + "i": "w-1728223503577", + "x": 0, + "y": 0, + "w": 3, + "h": 3, + "minW": 2, + "minH": 2, + "static": false + } + ], + "widget_data": [ + { + "i": "w-1728223503577", + "x": 0, + "y": 0, + "w": 3, + "h": 3, + "minW": 2, + "minH": 2, + "static": false, + "thresholds": [ + { "value": 0, "color": "#22c55e" }, + { "value": 20, "color": "#ef4444" } + ], + "preset": { + "id": "11111111-1111-1111-1111-111111111111", + "name": "feed", + "options": [ + { "label": "CEL", "value": "(!deleted && !dismissed)" }, + { + "label": "SQL", + "value": { + "sql": "(deleted=false AND dismissed=false)", + "params": {} + } + } + ], + "created_by": null, + "is_private": false, + "is_noisy": false, + "should_do_noise_now": false, + "alerts_count": 98, + "static": true, + "tags": [] + }, + "name": "Test" + } + ] + } + } +] +``` + +Please read more at https://github.com/react-grid-layout/react-grid-layout for more information on the layout configuration options. diff --git a/docs/deployment/provision/overview.mdx b/docs/deployment/provision/overview.mdx new file mode 100644 index 0000000000..7d6226f6ab --- /dev/null +++ b/docs/deployment/provision/overview.mdx @@ -0,0 +1,32 @@ +--- +title: "Overview" +--- + +Keep supports various deployment and provisioning strategies to accommodate different environments and use cases, from development setups to production deployments. + +### Provisioning Options + +Keep offers three main provisioning options: + +1. [**Provider Provisioning**](/deployment/provision/provider) - Set up and manage data providers for Keep. +2. [**Workflow Provisioning**](/deployment/provision/workflow) - Configure and manage workflows within Keep. +3. [**Dashboard Provisioning**](/deployment/provision/dashboard) - Configure and manage dashboards within Keep. + +Choosing the right provisioning strategy depends on your specific use case, deployment environment, and scalability requirements. You can read more about each provisioning option in their respective sections. + +### How To Configure Provisioning + + + Some provisioning options require additional environment variables. These will + be covered in detail on the specific provisioning pages. + + +Provisioning in Keep is controlled through environment variables and configuration files. The main environment variables for provisioning are: + +| Provisioning Type | Environment Variable | Purpose | +| ----------------- | -------------------------- | ------------------------------------------------------ | +| **Provider** | `KEEP_PROVIDERS` | JSON string containing provider configurations | +| **Workflow** | `KEEP_WORKFLOWS_DIRECTORY` | Directory path containing workflow configuration files | +| **Dashboard** | `KEEP_DASHBOARDS` | JSON string containing dashboard configurations | + +For more details on each provisioning strategy, including setup instructions and implications, refer to the respective sections. diff --git a/docs/deployment/provision/provider.mdx b/docs/deployment/provision/provider.mdx new file mode 100644 index 0000000000..f6993aabf3 --- /dev/null +++ b/docs/deployment/provision/provider.mdx @@ -0,0 +1,58 @@ +--- +title: "Providers Provisioning" +--- + +For any questions or issues related to provider provisioning, please join our [Slack](https://slack.keephq.dev) community. + +Provider provisioning in Keep allows you to set up and manage data providers dynamically. This feature enables you to configure various data sources that Keep can interact with, such as monitoring systems, databases, or other services. + +### Configuring Providers + +To provision providers, set the `KEEP_PROVIDERS` environment variable with a JSON string containing the provider configurations. Here's an example: + +```json +{ + "keepVictoriaMetrics": { + "type": "victoriametrics", + "authentication": { + "VMAlertHost": "http://localhost", + "VMAlertPort": 1234 + } + }, + "keepClickhouse1": { + "type": "clickhouse", + "authentication": { + "host": "http://localhost", + "port": 1234, + "username": "keep", + "password": "keep", + "database": "keep-db" + } + } +} +``` + +Spin up Keep with this `KEEP_PROVIDERS` value: +```json +# ENV +KEEP_PROVIDERS={"keepVictoriaMetrics":{"type":"victoriametrics","authentication":{"VMAlertHost":"http://localhost","VMAlertPort": 1234}},"keepClickhouse1":{"type":"clickhouse","authentication":{"host":"http://localhost","port":"4321","username":"keep","password":"1234","database":"keepdb"}}} +``` + +### Supported Providers + +Keep supports a wide range of provider types. Each provider type has its own specific configuration requirements. +To see the full list of supported providers and their detailed configuration options, please refer to our comprehensive provider documentation. + + +### Update Provisioned Providers + +Provider configurations can be updated dynamically by changing the `KEEP_PROVIDERS` environment variable. + +On every restart, Keep reads this environment variable and determines which providers need to be added or removed. + +This process allows for flexible management of data sources without requiring manual intervention. By simply updating the `KEEP_PROVIDERS` variable and restarting the application, you can efficiently add new providers, remove existing ones, or modify their configurations. + +The high-level provisioning mechanism: +1. Keep reads the `KEEP_PROVIDERS` value. +2. Keep checks if there are any provisioned providers that are no longer in the `KEEP_PROVIDERS` value, and deletes them. +3. Keep installs all providers from the `KEEP_PROVIDERS` value. diff --git a/docs/deployment/provision/workflow.mdx b/docs/deployment/provision/workflow.mdx new file mode 100644 index 0000000000..134704dc4f --- /dev/null +++ b/docs/deployment/provision/workflow.mdx @@ -0,0 +1,36 @@ +--- +title: "Workflow Provisioning" +--- + +For any questions or issues related to workflow provisioning, please join our [Slack](https://slack.keephq.dev) community. + +Workflow provisioning in Keep allows you to set up and manage workflows dynamically. This feature enables you to configure various automated processes and tasks within your Keep deployment. + +### Configuring Workflows + +To provision workflows, follow these steps: + +1. Set the `KEEP_WORKFLOWS_DIRECTORY` environment variable to the path of your workflow configuration directory. +2. Create workflow configuration files in the specified directory. + +Example directory structure: +``` +/path/to/workflows/ +โ”œโ”€โ”€ workflow1.yaml +โ”œโ”€โ”€ workflow2.yaml +โ””โ”€โ”€ workflow3.yaml +``` +### Update Provisioned Workflows + +On every restart, Keep reads the `KEEP_WORKFLOWS_DIRECTORY` environment variable and determines which workflows need to be added, removed, or updated. + +This process allows for flexible management of workflows without requiring manual intervention. By simply updating the workflow files in the `KEEP_WORKFLOWS_DIRECTORY` and restarting the application, you can efficiently add new workflows, remove existing ones, or modify their configurations. + +The high-level provisioning mechanism: +1. Keep reads the `KEEP_WORKFLOWS_DIRECTORY` value. +2. Keep lists all workflow files under the `KEEP_WORKFLOWS_DIRECTORY` directory. +3. Keep compares the current workflow files with the previously provisioned workflows: + - New workflow files are provisioned. + - Missing workflow files are deprovisioned. + - Updated workflow files are re-provisioned with the new configuration. +4. Keep updates its internal state to reflect the current set of provisioned workflows. diff --git a/docs/deployment/secret-manager.mdx b/docs/deployment/secret-manager.mdx new file mode 100644 index 0000000000..4de17c68a6 --- /dev/null +++ b/docs/deployment/secret-manager.mdx @@ -0,0 +1,84 @@ +--- +title: "Secret Manager" +sidebarTitle: "Secret Manager" +--- + +## Overview + +Secret Manager selection is crucial for securing your application. Different modes can be set up depending on the deployment type. Our system supports four primary secret manager types. + +## Secret Manager Factory + +The `SecretManagerFactory` is a utility class used to create instances of different types of secret managers. It leverages the Factory design pattern to abstract the creation logic based on the type of secret manager required. The factory supports creating instances of File, GCP, Kubernetes, and Vault Secret Managers. + +The `SECRET_MANAGER_TYPE` environment variable plays a crucial role in the SecretManagerFactory for determining the default type of secret manager to be instantiated when no specific type is provided in the method call. + +**Functionality**: + +**Default Secret Manager**: If the `SECRET_MANAGER_TYPE` environment variable is set, its value dictates the default type of secret manager that the factory will create. +The value of this variable should correspond to one of the types defined in SecretManagerTypes enum (`FILE`, `GCP`, `K8S`, `VAULT`). + +**Example Configuration**: + +Setting `SECRET_MANAGER_TYPE=GCP` in the environment will make the factory create instances of GcpSecretManager by default. +If `SECRET_MANAGER_TYPE` is not set or is set to `FILE`, the factory defaults to creating instances of FileSecretManager. +This environment variable provides flexibility and ease of configuration, allowing different secret managers to be used in different environments or scenarios without code changes. + +## File Secert Manager + +The `FileSecretManager` is a concrete implementation of the BaseSecretManager for managing secrets stored in the file system. It uses a specified directory (defaulting to ./) to read, write, and delete secret files. + +Configuration: + +Set the environment variable `SECRET_MANAGER_DIRECTORY` to specify the directory where secrets are stored. If not set, defaults to the current directory (./). + +Usage: + +- Secrets are stored as files in the specified directory. +- Reading a secret involves fetching content from a file. +- Writing a secret creates or updates a file with the given content. +- Deleting a secret removes the corresponding file. + +## Kubernetes Secret Manager + +The `KubernetesSecretManager` interfaces with Kubernetes' native secrets system. It manages secrets within a specified Kubernetes namespace and is designed to operate within a Kubernetes cluster. + +Configuration: + +Set `K8S_NAMESPACE` environment variable to specify the Kubernetes namespace. Defaults to default if not set. Assumes Kubernetes configurations (like service account tokens) are properly set up when running within a cluster. + +Usage: + +- Secrets are stored as Kubernetes Secret objects. +- Provides functionalities to create, retrieve, and delete Kubernetes secrets. +- Handles base64 encoding and decoding as required by Kubernetes. + +## GCP Secret Manager + +The `GcpSecretManager` utilizes Google Cloud's Secret Manager service for secret management. It requires setting up with Google Cloud credentials and a project ID. + +Configuration: + +Ensure the environment variable `GOOGLE_CLOUD_PROJECT` is set with your Google Cloud project ID. + +Usage: + +- Secrets are managed using Google Cloud's Secret Manager. +- Supports operations to create, access, and delete secrets in the cloud. +- Integrates with OpenTelemetry for tracing secret management operations. + +## Hashicorp Vault Secret Manager + +The `VaultSecretManager` is tailored for Hashicorp Vault, a tool for managing sensitive data. It supports token-based authentication as well as Kubernetes-based authentication for Vault. + +Configuration: + +- Set `HASHICORP_VAULT_ADDR` to the Vault server address. Defaults to http://localhost:8200. +- Use `HASHICORP_VAULT_TOKEN` for token-based authentication. +- Set `HASHICORP_VAULT_USE_K8S` to True and provide `HASHICORP_VAULT_K8S_ROLE` for Kubernetes-based authentication. + +Usage: + +- Manages secrets in a Hashicorp Vault server. +- Provides methods to write, read, and delete secrets from Vault. +- Supports different Vault authentication methods including static tokens and Kubernetes service account tokens. diff --git a/docs/deployment/stress-testing.mdx b/docs/deployment/stress-testing.mdx new file mode 100644 index 0000000000..9f9dc97166 --- /dev/null +++ b/docs/deployment/stress-testing.mdx @@ -0,0 +1,112 @@ +--- +title: "" +sidebarTitle: "Specifications" +--- + +# Specifications and Stress Testing of Keep +If you are using Keep and have performance issues, we will be more than happy to help you. Just join our [slack](https://slack.keepqh.dev) and shoot a message on the **#help** channel. + +## Overview + +Spec and stress testing are crucial to ensuring the robust performance and scalability of Keep. +This documentation outlines the key areas of focus for testing Keep under different load conditions, considering both the simplicity of setup for smaller environments and the scalability mechanisms for larger deployments. + +Keep was initially designed to be user-friendly for setups handling less than 10,000 alerts. However, as alert volumes increase, users can leverage advanced features such as Elasticsearch for document storage and Redis + ARQ for queue-based alert ingestion. While these advanced configurations are not fully documented here, they are supported and can be discussed further in our Slack community. + +## How To Reproduce + +To reproduce the stress testing scenarios mentioned above, please refer to the [STRESS.md](https://github.com/keephq/keep/blob/main/STRESS.md) file in Keep's repository. This document provides step-by-step instructions on how to set up, run, and measure the performance of Keep under different load conditions. + +## Performance Testing + +### Factors Affecting Specifications + +The primary parameters that affect the specification requirements for Keep are: +1. **Alerts Volume**: The rate at which alerts are ingested into the system. +2. **Total Alerts**: The cumulative number of alerts stored in the system. +3. **Number of Workflows**: How many automation run as a result of alert. + +### Main Components: +- **Keep Backend** - API and buisness logic. A container that serves FastAPI on top of gunicorn. +- **Keep Frontend** - Web app. A container that serves the react app. +- **Database** - Stores the alerts and any other operational data. +- **Elasticsearch** (opt out by default) - Stores alerts as document for better search performance. +- **Redis** (opt out by default) - Used, together with ARQ, as an alerts queue. + +### Testing Scenarios: + +- **Low Volume (< 10,000 total alerts, hundreds of alerts per day)**: + - **Setup**: Use a standard relational database (e.g., MySQL, PostgreSQL) with default configurations. + - **Expectations**: Keep should handle queries and alert ingestion with minimal resource usage. + +- **Medium Volume (10,000 - 100,000 total alerts, thousands of alerts per day)**: + - **Setup**: Scale the database to larger instances or clusters. Adjust best practices to the DB (e.g. increasing innodb_buffer_pool_size) + - **Expectations**: CPU and RAM usage should increase proportionally but remain within acceptable limits. + +3. **High Volume (100,000 - 1,000,000 total alerts, >five thousands of alerts per day)**: + - **Setup**: Deploy Keep with Elasticsearch for storing alerts as documents. + - **Expectations**: The system should maintain performance levels despite the large alert volume, with increased resource usage managed through scaling strategies. +4. **Very High Volume (> 1,000,000 total alerts, tens of thousands of alerts per day)**: + - **Setup**: Deploy Keep with Elasticsearch for storing alerts as documents. + - **Setup #2**: Deploy Keep with Redis and with ARQ to use Redis as a queue. + +## Recommended Specifications by Alert Volume + +| **Number of Alerts** | **Keep Backend** | **Keep Database** | **Redis** | **Elasticsearch** | +|------------------------|------------------------------------------------|-------------------------------------------------|------------------------------------------------|------------------------------------------------| +| **< 10,000** | 1 vCPUs, 2GB RAM | 2 vCPUs, 8GB RAM | Not required | Not required | +| **10,000 - 100,000** | 4 vCPUs, 8GB RAM | 8 vCPUs, 32GB RAM, optimized indexing | Not required | Not required | +| **100,000 - 500,000** | 8 vCPUs, 16GB RAM | 8 vCPUs, 32GB RAM, advanced indexing | 4 vCPUs, 8GB RAM | 8 vCPUs, 32GB RAM, 2-3 nodes | +| **> 500,000** | 8 vCPUs, 16GB RAM | 8 vCPUs, 32GB RAM, advanced indexing, sharding| 4 vCPUs, 8GB RAM | 8 vCPUs, 32GB RAM, 2-3 nodes | + +## Performance by Operation Type, Load, and Specification + +| **Operation Type** | **Load** | **Specification** | **Execution Time** | +|-----------------------|----------------------------|------------------------------|-----------------------------------| +| Digest Alert | 100 alerts per minute | 4 vCPUs, 8GB RAM | ~0.5 seconds | +| Digest Alert | 500 alerts per minute | 8 vCPUs, 16GB RAM | ~1 second | +| Digest Alert | 1,000 alerts per minute | 16 vCPUs, 32GB RAM | ~1.5 seconds | +| Run Workflow | 10 workflows per minute | 4 vCPUs, 8GB RAM | ~1 second | +| Run Workflow | 50 workflows per minute | 8 vCPUs, 16GB RAM | ~2 seconds | +| Run Workflow | 100 workflows per minute | 16 vCPUs, 32GB RAM | ~3 seconds | +| Ingest via Queue | 100 alerts per minute | 4 vCPUs, 8GB RAM, Redis | ~0.3 seconds | +| Ingest via Queue | 500 alerts per minute | 8 vCPUs, 16GB RAM, Redis | ~0.8 seconds | +| Ingest via Queue | 1,000 alerts per minute | 16 vCPUs, 32GB RAM, Redis | ~1.2 seconds | + +### Table Explanation: +- **Operation Type**: The specific operation being tested (e.g., digesting alerts, running workflows). +- **Load**: The number of operations per minute being processed (e.g., number of alerts per minute). +- **Specification**: The CPU, RAM, and additional services used for the operation. +- **Execution Time**: Approximate time taken to complete the operation under the given load and specification. + + +## Fine Tuning + +As any deployment has its own characteristics, such as the balance between volume vs. total count of alerts or volume vs. number of workflows, Keep can be fine-tuned with the following parameters: + +1. **Number of Workers**: Adjust the number of Gunicorn workers to handle API requests more efficiently. You can also start additional API servers to distribute the load. +2. **Distinguish Between API Server Workers and Digesting Alerts Workers**: Separate the workers dedicated to handling API requests from those responsible for digesting alerts, ensuring that each set of tasks is optimized according to its specific needs. +3. **Add More RAM to the Database**: Increasing the RAM allocated to your database can help manage larger datasets and improve query performance, particularly when dealing with high volumes of alerts. +4. **Optimize Database Configuration**: Keep was mainly tested on MySQL and PostgreSQL. Different database may have different fine tuning mechanisms. +5. **Horizontal Scaling**: Consider deploying additional instances of the API and database services to distribute the load more effectively. + + + +## FAQ + +### 1. How do I estimate the spec I need for Keep? +To estimate the specifications required for Keep, consider both the number of alerts per minute and the total number of alerts you expect to handle. Refer to the **Recommended Specifications by Alert Volume** table above to match your expected load with the appropriate resources. + +### 2. How do I know if I need Elasticsearch? +Elasticsearch is typically needed when you are dealing with more than 50,000 total alerts or if you require advanced search and query capabilities that are not efficiently handled by a traditional relational database. If your systemโ€™s performance degrades significantly as alert volume increases, it may be time to consider Elasticsearch. + +### 3. How do I know if I need Redis? +Redis is recommended when your alert ingestion rate exceeds 1,000 alerts per minute or when you notice that the API is becoming a bottleneck due to high ingestion rates. Redis, combined with ARQ (Asynchronous Redis Queue), can help manage and distribute the load more effectively. + +### 4. What should I do if Keep's performance is still inadequate? +If you have scaled according to the recommendations and are still facing performance issues, consider: +- **Optimizing your database configuration**: Indexing, sharding, and query optimization can make a significant difference. +- **Horizontal scaling**: Distribute the load across multiple instances of the API and database services. +- **Reach out to our Slack community**: For personalized support, reach out to us on Slack, and weโ€™ll help you troubleshoot and optimize your Keep deployment. + +For any additional questions or tailored advice, feel free to join our Slack community where our team and other users are available to assist you. diff --git a/docs/development/external-url.mdx b/docs/development/external-url.mdx new file mode 100644 index 0000000000..e73ed2b99d --- /dev/null +++ b/docs/development/external-url.mdx @@ -0,0 +1,26 @@ +--- +title: "Keep with an external URL" +sidebarTitle: "Keep with an external URL" +--- + +## Introduction +Several features in Keep necessitate an external URL that is accessible from the internet. This is particularly crucial for functionalities like Webhook Integration when installing providers. Keep uses its API URL to establish itself as a webhook connector during this process. + +When an alert is triggered, the corresponding Provider attempts to activate the webhook, delivering the alert payload. Consequently, the webhook must be accessible over the internet for this process to work effectively. + +## Utilizing NGROK for External Accessibility + + +Keep supports the use of NGROK to create an accessible external URL. By starting Keep with the environment variable USE_NGROK=true, Keep will automatically initiate an NGROK tunnel and utilize this URL for webhook installations. + + + While `USE_NGROK` is convenient for development or testing, it's important to note that each restart of Keep results in a new NGROK URL. This change in the URL means that providers configured with the old URL will no longer be able to communicate with Keep. + + + For production environments, it's advisable to either: + - Expose Keep with a permanent, internet-accessible URL. + - Set up a static NGROK tunnel. + + Subsequently, configure Keep to use this stable URL by setting the KEEP_API_URL environment variable. + + diff --git a/docs/development/getting-started.mdx b/docs/development/getting-started.mdx new file mode 100644 index 0000000000..1e84b4bfaa --- /dev/null +++ b/docs/development/getting-started.mdx @@ -0,0 +1,427 @@ +--- +title: "Getting started" +sidebarTitle: "Getting started" +--- + +### Docker-compose dev images +You can use `docker-compose.dev.yaml` to start Keep in a development mode. + +First, clone the Keep repo: +``` +git clone https://github.com/keephq/keep.git && cd keep +``` + +Next, run +``` +docker compose -f docker-compose.dev.yml up +``` + +### Install Keep CLI + +First, clone Keep repository: + +```shell +git clone https://github.com/keephq/keep.git && cd keep +``` + +Install Keep CLI + +```shell +poetry install +``` + +To access the Keep CLI activate the environment, and access from shell. + +```shell +poetry shell +``` + +From now on, Keep should be installed locally and accessible from your CLI, test it by executing: + +``` +keep version +``` + +## Enable Auto Completion + +**Keep's CLI supports shell auto-completion, which can make your life a whole lot easier ๐Ÿ˜Œ** + +If you're using zsh + +```shell title=~/.zshrc +eval "$(_KEEP_COMPLETE=zsh_source keep)" +``` + +If you're using bash + +```bash title=~/.bashrc +eval "$(_KEEP_COMPLETE=bash_source keep)" +``` + +> Using eval means that the command is invoked and evaluated every time a shell is started, which can delay shell responsiveness. To speed it up, write the generated script to a file, then source that. + + +### Testing + +Run unittests: +```bash +poetry run coverage run --branch -m pytest --ignore=tests/e2e_tests/ +``` + +Run E2E tests (run Keep locally before): +```bash +poetry run playwright install; +poetry run coverage run --branch -m pytest -s tests/e2e_tests/ +``` + +### Migrations + +Migrations are automatically executed on a server startup. To create a migration: +```bash +alembic -c keep/alembic.ini revision --autogenerate -m "Your message" +``` + +Hint: make sure your models are imported at `./api/models/db/migrations/env.py` for autogenerator to pick them up. + +## VSCode +You can run Keep from your VSCode (after cloning the repo) by adding this configurations to your `.vscode/launch.json`: + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Keep Backend", + "type": "debugpy", + "request": "launch", + "program": "keep/cli/cli.py", + "console": "integratedTerminal", + "justMyCode": false, + "python": "venv/bin/python", + "args": ["--json", "api","--multi-tenant"], + "env": { + "PYDEVD_DISABLE_FILE_VALIDATION": "1", + "PYTHONPATH": "${workspaceFolder}/", + "PUSHER_APP_ID": "1", + "SECRET_MANAGER_DIRECTORY": "./state/", + "PUSHER_HOST": "localhost", + "PUSHER_PORT": "6001", + "PUSHER_APP_KEY": "keepappkey", + "PUSHER_APP_SECRET": "keepappsecret", + "LOG_FORMAT": "dev_terminal", + } + }, + { + "name": "Keep Simulate Alerts", + "type": "debugpy", + "request": "launch", + "program": "scripts/simulate_alerts.py", + "console": "integratedTerminal", + "justMyCode": false, + "python": "venv/bin/python", + "env": { + "PYDEVD_DISABLE_FILE_VALIDATION": "1", + "PYTHONPATH": "${workspaceFolder}/", + "KEEP_API_URL": "http://localhost:8080", + "KEEP_API_KEY": "some-api-key" + } + }, + { + "name": "Keep Frontend", + "type": "node-terminal", + "request": "launch", + "command": "npm run dev", + "cwd": "${workspaceFolder}/keep-ui", + } + ] +} +``` + +Install dependencies: +``` +python3.11 -m venv venv; +source venv/bin/activate; +pip install poetry; +poetry install; +cd keep-ui && npm i && cd ..; +``` + +Set frontend envs: +``` +cp keep-ui/.env.local.example keep-ui/.env.local; +echo "\n\n\n\nNEXTAUTH_SECRET="$(openssl rand -hex 32) >> keep-ui/.env.local; +``` + +Launch Pusher ([soketi](https://soketi.app/)) container in parallel: +```bash +docker run -d -p 6001:6001 -p 9601:9601 -e SOKETI_USER_AUTHENTICATION_TIMEOUT=3000 -e SOKETI_DEFAULT_APP_KEY=keepappkey -e SOKETI_DEFAULT_APP_SECRET=keepappsecret -e SOKETI_DEFAULT_APP_ID=1 quay.io/soketi/soketi:1.4-16-debian +``` + + +## VSCode + Docker +For this guide to work, the VSCode Docker extension is required. +In air-gapped environments, you might consider building the container on an internet-connected computer, exporting the image using docker save, transferring it with docker load in the air-gapped environment, and then using the run configuration. + +In cases where you want to develop Keep but are unable to run it directly on your local laptop (e.g., with Windows), or if you lack access to all of its dependencies (e.g., in air-gapped environments), you can still accomplish this using VSCode and Docker. + +To achieve this, follow these steps: + +1. Clone Keep and open it with VSCode +2. Create a tasks.json file to build and run the Keep API and Keep UI containers. +3. Create a launch.json configuration to start the containers and attach a debugger to them. +4. Profit. + + +### Clone Keep and open it with VSCode +``` +git clone https://github.com/keephq/keep.git && cd keep +code . +``` + +### Create tasks.json + +#### including building the containers +``` +{ + "version": "2.0.0", + "tasks": [ + # The API and UI containers needs to be in the same docker network + { + "label": "docker-create-network", + "type": "shell", + "command": "docker network create keep-network || true", + "problemMatcher": [] + }, + # Build the api container + { + "label": "docker-build-api-dev", + "type": "docker-build", + "dockerBuild": { + "context": "${workspaceFolder}", + "dockerfile": "${workspaceFolder}/Docker/Dockerfile.dev.api", + "tag": "keep-api-dev:latest" + } + }, + # Run the api container + { + "label": "docker-run-api-dev", + "type": "docker-run", + "dependsOn": [ + "docker-build-api-dev", "docker-create-network" + ], + "python": { + "args": [ + "api" + ], + "file": "./keep/cli/cli.py" + }, + "dockerRun": { + "network": "keep-network", + "image": "keep-api-dev:latest", + "containerName": "keep-api", + "ports": [ + { + "containerPort": 8080, + "hostPort": 8080 + } + ], + "env": { + "DEBUG": "1", + "SECRET_MANAGER_TYPE": "FILE", + "USE_NGROK": "false", + "AUTH_TYPE": "SINGLE_TENANT" + }, + "volumes": [ + { + "containerPath": "/app", + "localPath": "${workspaceFolder}" + } + ] + } + }, + # Build the UI container + { + "label": "docker-build-ui", + "type": "docker-build", + "dockerBuild": { + "context": "${workspaceFolder}", + "dockerfile": "${workspaceFolder}/Docker/Dockerfile.dev.ui", + "tag": "keep-ui-dev:latest" + } + }, + # Run the UI container + { + "type": "docker-run", + "label": "docker-run-ui", + "dependsOn": [ + "docker-build-ui", "docker-create-network" + ], + "dockerRun": { + "network": "keep-network", + "image": "keep-ui-dev:latest", + "containerName": "keep-ui", + "env": { + // Uncomment for fully debug + // "DEBUG": "*", + "NODE_ENV": "development", + "API_URL": "http://keep-api:8080" + "AUTH_TYPE": "SINGLE_TENANT", + }, + "volumes": [ + { + "containerPath": "/app", + "localPath": "${workspaceFolder}/keep-ui" + } + ], + "ports": [ + { + "containerPort": 9229, + "hostPort": 9229 + }, + { + "containerPort": 3000, + "hostPort": 3000 + } + ], + "command": "npm run dev", + }, + "node": { + "package": "${workspaceFolder}/keep-ui/package.json", + "enableDebugging": true + } + } + ] +} + +``` + +#### without building the containers +To start Keep without building the containers, you'll need to have `keep-api-dev` and `keep-ui-dev` images loaded into your docker. + +``` +{ + "version": "2.0.0", + "tasks": [ + # The API and the UI needs to be in the same docker network + { + "label": "docker-create-network", + "type": "shell", + "command": "docker network create keep-network || true", + "problemMatcher": [] + }, + # Run the API container + { + "label": "docker-run-api-dev", + "type": "docker-run", + "dependsOn": [ + "docker-create-network" + ], + "python": { + "args": [ + "api" + ], + "file": "./keep/cli/cli.py" + }, + "dockerRun": { + "network": "keep-network", + "image": "keep-api-dev:latest", + "containerName": "keep-api", + "ports": [ + { + "containerPort": 8080, + "hostPort": 8080 + } + ], + "env": { + "DEBUG": "1", + "SECRET_MANAGER_TYPE": "FILE", + "USE_NGROK": "false", + "AUTH_TYPE": "SINGLE_TENANT" + }, + "volumes": [ + { + "containerPath": "/app", + "localPath": "${workspaceFolder}" + } + ] + } + }, + # Run the UI container + { + "type": "docker-run", + "label": "docker-run-ui", + "dependsOn": [ + "docker-create-network" + ], + "dockerRun": { + "network": "keep-network", + "image": "keep-ui-dev:latest", + "containerName": "keep-ui", + "env": { + // Uncomment for fully debug + // "DEBUG": "*", + "NODE_ENV": "development", + "API_URL": "http://keep-api:8080", + "AUTH_TYPE": "SINGLE_TENANT" + }, + "volumes": [ + { + "containerPath": "/app", + "localPath": "${workspaceFolder}/keep-ui" + } + ], + "ports": [ + { + "containerPort": 9229, + "hostPort": 9229 + }, + { + "containerPort": 3000, + "hostPort": 3000 + } + ], + "command": "npm run dev", + }, + "node": { + "package": "${workspaceFolder}/keep-ui/package.json", + "enableDebugging": true + } + } + ] +} +``` + +### Create launch.json + +``` +{ + "name": "Docker: Keep API", + "type": "docker", + "request": "launch", + "preLaunchTask": "docker-run-api-dev", + "removeContainerAfterDebug": true, + "containerName": "keep-api", + "python": { + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "/app" + } + ], + "module": "keep.cli.cli" + } + }, + { + "name": "Docker: Keep UI", + "type": "docker", + "request": "launch", + "removeContainerAfterDebug": true, + "preLaunchTask": "docker-run-ui", + "containerName": "keep-api", + "platform": "node", + "node": { + "package": "${workspaceFolder}/keep-ui/package.json", + "localRoot": "${workspaceFolder}/keep-ui" + } + }, +``` diff --git a/docs/favicon.svg b/docs/favicon.svg new file mode 100644 index 0000000000..f43e99dad8 --- /dev/null +++ b/docs/favicon.svg @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/addworkflow.png b/docs/images/addworkflow.png new file mode 100644 index 0000000000..daecad9bdd Binary files /dev/null and b/docs/images/addworkflow.png differ diff --git a/docs/images/alerthistory.png b/docs/images/alerthistory.png new file mode 100644 index 0000000000..17fff3a18b Binary files /dev/null and b/docs/images/alerthistory.png differ diff --git a/docs/images/alertspage.png b/docs/images/alertspage.png new file mode 100644 index 0000000000..b2a12b0cf3 Binary files /dev/null and b/docs/images/alertspage.png differ diff --git a/docs/images/alertspushed.png b/docs/images/alertspushed.png new file mode 100644 index 0000000000..d57b7e78af Binary files /dev/null and b/docs/images/alertspushed.png differ diff --git a/docs/images/auth0auth.png b/docs/images/auth0auth.png new file mode 100644 index 0000000000..115c972566 Binary files /dev/null and b/docs/images/auth0auth.png differ diff --git a/docs/images/azuremonitoring_1.png b/docs/images/azuremonitoring_1.png new file mode 100644 index 0000000000..b9636b27f2 Binary files /dev/null and b/docs/images/azuremonitoring_1.png differ diff --git a/docs/images/azuremonitoring_2.png b/docs/images/azuremonitoring_2.png new file mode 100644 index 0000000000..58b26d4367 Binary files /dev/null and b/docs/images/azuremonitoring_2.png differ diff --git a/docs/images/azuremonitoring_3.png b/docs/images/azuremonitoring_3.png new file mode 100644 index 0000000000..c76740efd8 Binary files /dev/null and b/docs/images/azuremonitoring_3.png differ diff --git a/docs/images/azuremonitoring_4.png b/docs/images/azuremonitoring_4.png new file mode 100644 index 0000000000..b068c8a811 Binary files /dev/null and b/docs/images/azuremonitoring_4.png differ diff --git a/docs/images/azuremonitoring_5.png b/docs/images/azuremonitoring_5.png new file mode 100644 index 0000000000..0935013c68 Binary files /dev/null and b/docs/images/azuremonitoring_5.png differ diff --git a/docs/images/azuremonitoring_6.png b/docs/images/azuremonitoring_6.png new file mode 100644 index 0000000000..3ee7ab07b0 Binary files /dev/null and b/docs/images/azuremonitoring_6.png differ diff --git a/docs/images/azuremonitoring_7.png b/docs/images/azuremonitoring_7.png new file mode 100644 index 0000000000..546a5a3271 Binary files /dev/null and b/docs/images/azuremonitoring_7.png differ diff --git a/docs/images/chart_example_1.webp b/docs/images/chart_example_1.webp new file mode 100644 index 0000000000..48692f0ac2 Binary files /dev/null and b/docs/images/chart_example_1.webp differ diff --git a/docs/images/chart_example_2.webp b/docs/images/chart_example_2.webp new file mode 100644 index 0000000000..83d0105e67 Binary files /dev/null and b/docs/images/chart_example_2.webp differ diff --git a/docs/images/connect-provider.png b/docs/images/connect-provider.png new file mode 100644 index 0000000000..68d75b2aef Binary files /dev/null and b/docs/images/connect-provider.png differ diff --git a/docs/images/connectmissingprovider.png b/docs/images/connectmissingprovider.png new file mode 100644 index 0000000000..9931f39894 Binary files /dev/null and b/docs/images/connectmissingprovider.png differ diff --git a/docs/images/connectmissingproviderinstallation.png b/docs/images/connectmissingproviderinstallation.png new file mode 100644 index 0000000000..5c0cc1cb24 Binary files /dev/null and b/docs/images/connectmissingproviderinstallation.png differ diff --git a/docs/images/console_provider_example.png b/docs/images/console_provider_example.png new file mode 100644 index 0000000000..f120c882e7 Binary files /dev/null and b/docs/images/console_provider_example.png differ diff --git a/docs/images/coralogix-provider_1.png b/docs/images/coralogix-provider_1.png new file mode 100644 index 0000000000..b6965352d2 Binary files /dev/null and b/docs/images/coralogix-provider_1.png differ diff --git a/docs/images/coralogix-provider_2.png b/docs/images/coralogix-provider_2.png new file mode 100644 index 0000000000..2a127a10f2 Binary files /dev/null and b/docs/images/coralogix-provider_2.png differ diff --git a/docs/images/coralogix-provider_3.png b/docs/images/coralogix-provider_3.png new file mode 100644 index 0000000000..44efcf61c8 Binary files /dev/null and b/docs/images/coralogix-provider_3.png differ diff --git a/docs/images/coralogix-provider_4.png b/docs/images/coralogix-provider_4.png new file mode 100644 index 0000000000..1038a0b91b Binary files /dev/null and b/docs/images/coralogix-provider_4.png differ diff --git a/docs/images/coralogix-provider_5.png b/docs/images/coralogix-provider_5.png new file mode 100644 index 0000000000..dbc7081e80 Binary files /dev/null and b/docs/images/coralogix-provider_5.png differ diff --git a/docs/images/coralogix-provider_6.png b/docs/images/coralogix-provider_6.png new file mode 100644 index 0000000000..f675d0102f Binary files /dev/null and b/docs/images/coralogix-provider_6.png differ diff --git a/docs/images/dbauth.png b/docs/images/dbauth.png new file mode 100644 index 0000000000..bd46933bdb Binary files /dev/null and b/docs/images/dbauth.png differ diff --git a/docs/images/deleteworkflow.png b/docs/images/deleteworkflow.png new file mode 100644 index 0000000000..cbd39edc24 Binary files /dev/null and b/docs/images/deleteworkflow.png differ diff --git a/docs/images/ecs-task-def-backend1.png b/docs/images/ecs-task-def-backend1.png new file mode 100644 index 0000000000..cd79c14292 Binary files /dev/null and b/docs/images/ecs-task-def-backend1.png differ diff --git a/docs/images/ecs-task-def-backend2.png b/docs/images/ecs-task-def-backend2.png new file mode 100644 index 0000000000..e6ff04309b Binary files /dev/null and b/docs/images/ecs-task-def-backend2.png differ diff --git a/docs/images/ecs-task-def-backend3.png b/docs/images/ecs-task-def-backend3.png new file mode 100644 index 0000000000..917fd2f1ad Binary files /dev/null and b/docs/images/ecs-task-def-backend3.png differ diff --git a/docs/images/ecs-task-def-backend4.png b/docs/images/ecs-task-def-backend4.png new file mode 100644 index 0000000000..ba7b8750b0 Binary files /dev/null and b/docs/images/ecs-task-def-backend4.png differ diff --git a/docs/images/ecs-task-def-backend5.png b/docs/images/ecs-task-def-backend5.png new file mode 100644 index 0000000000..eaef8d56c7 Binary files /dev/null and b/docs/images/ecs-task-def-backend5.png differ diff --git a/docs/images/ecs-task-def-backend6.png b/docs/images/ecs-task-def-backend6.png new file mode 100644 index 0000000000..e47b91ca75 Binary files /dev/null and b/docs/images/ecs-task-def-backend6.png differ diff --git a/docs/images/ecs-task-def-create-new.png b/docs/images/ecs-task-def-create-new.png new file mode 100644 index 0000000000..9cfd0904a4 Binary files /dev/null and b/docs/images/ecs-task-def-create-new.png differ diff --git a/docs/images/ecs-task-def-create.png b/docs/images/ecs-task-def-create.png new file mode 100644 index 0000000000..ef309433b7 Binary files /dev/null and b/docs/images/ecs-task-def-create.png differ diff --git a/docs/images/ecs-task-def-frontend1.png b/docs/images/ecs-task-def-frontend1.png new file mode 100644 index 0000000000..2744431c4c Binary files /dev/null and b/docs/images/ecs-task-def-frontend1.png differ diff --git a/docs/images/ecs-task-def-frontend2.png b/docs/images/ecs-task-def-frontend2.png new file mode 100644 index 0000000000..135d8b8ea4 Binary files /dev/null and b/docs/images/ecs-task-def-frontend2.png differ diff --git a/docs/images/ecs-task-def-frontend3.png b/docs/images/ecs-task-def-frontend3.png new file mode 100644 index 0000000000..8500e73b48 Binary files /dev/null and b/docs/images/ecs-task-def-frontend3.png differ diff --git a/docs/images/ecs-task-def-frontend4.png b/docs/images/ecs-task-def-frontend4.png new file mode 100644 index 0000000000..3402d3d665 Binary files /dev/null and b/docs/images/ecs-task-def-frontend4.png differ diff --git a/docs/images/ecs-task-def-websocket1.png b/docs/images/ecs-task-def-websocket1.png new file mode 100644 index 0000000000..3e0fd65c9e Binary files /dev/null and b/docs/images/ecs-task-def-websocket1.png differ diff --git a/docs/images/ecs-task-def-websocket2.png b/docs/images/ecs-task-def-websocket2.png new file mode 100644 index 0000000000..daaf655664 Binary files /dev/null and b/docs/images/ecs-task-def-websocket2.png differ diff --git a/docs/images/ecs-task-def-websocket3.png b/docs/images/ecs-task-def-websocket3.png new file mode 100644 index 0000000000..a1c4e32aa4 Binary files /dev/null and b/docs/images/ecs-task-def-websocket3.png differ diff --git a/docs/images/ecs-task-def-websocket4.png b/docs/images/ecs-task-def-websocket4.png new file mode 100644 index 0000000000..84fb54e1b8 Binary files /dev/null and b/docs/images/ecs-task-def-websocket4.png differ diff --git a/docs/images/extraction-rule-creation.png b/docs/images/extraction-rule-creation.png new file mode 100644 index 0000000000..833a2d62d8 Binary files /dev/null and b/docs/images/extraction-rule-creation.png differ diff --git a/docs/images/first-alert.yaml.png b/docs/images/first-alert.yaml.png new file mode 100644 index 0000000000..f0c0cad96d Binary files /dev/null and b/docs/images/first-alert.yaml.png differ diff --git a/docs/images/gcpmonitoring_1.png b/docs/images/gcpmonitoring_1.png new file mode 100644 index 0000000000..4bd027e115 Binary files /dev/null and b/docs/images/gcpmonitoring_1.png differ diff --git a/docs/images/gcpmonitoring_2.png b/docs/images/gcpmonitoring_2.png new file mode 100644 index 0000000000..561213c40d Binary files /dev/null and b/docs/images/gcpmonitoring_2.png differ diff --git a/docs/images/gcpmonitoring_3.png b/docs/images/gcpmonitoring_3.png new file mode 100644 index 0000000000..f56b62400b Binary files /dev/null and b/docs/images/gcpmonitoring_3.png differ diff --git a/docs/images/gcpmonitoring_4.png b/docs/images/gcpmonitoring_4.png new file mode 100644 index 0000000000..b9bf21d51e Binary files /dev/null and b/docs/images/gcpmonitoring_4.png differ diff --git a/docs/images/gcpmonitoring_5.png b/docs/images/gcpmonitoring_5.png new file mode 100644 index 0000000000..dc984d0bca Binary files /dev/null and b/docs/images/gcpmonitoring_5.png differ diff --git a/docs/images/gcpmonitoring_6.png b/docs/images/gcpmonitoring_6.png new file mode 100644 index 0000000000..25dcbea49b Binary files /dev/null and b/docs/images/gcpmonitoring_6.png differ diff --git a/docs/images/github-app-install.png b/docs/images/github-app-install.png new file mode 100644 index 0000000000..3039e4a0a4 Binary files /dev/null and b/docs/images/github-app-install.png differ diff --git a/docs/images/gke.png b/docs/images/gke.png new file mode 100644 index 0000000000..9050f17b49 Binary files /dev/null and b/docs/images/gke.png differ diff --git a/docs/images/gke2.png b/docs/images/gke2.png new file mode 100644 index 0000000000..ad106654f5 Binary files /dev/null and b/docs/images/gke2.png differ diff --git a/docs/images/grafana_sa.png b/docs/images/grafana_sa.png new file mode 100644 index 0000000000..155843e815 Binary files /dev/null and b/docs/images/grafana_sa.png differ diff --git a/docs/images/grafana_sa_2.png b/docs/images/grafana_sa_2.png new file mode 100644 index 0000000000..6074da9e08 Binary files /dev/null and b/docs/images/grafana_sa_2.png differ diff --git a/docs/images/grafana_sa_3.png b/docs/images/grafana_sa_3.png new file mode 100644 index 0000000000..73ced2a13b Binary files /dev/null and b/docs/images/grafana_sa_3.png differ diff --git a/docs/images/grafana_sa_4.png b/docs/images/grafana_sa_4.png new file mode 100644 index 0000000000..2c1c61a419 Binary files /dev/null and b/docs/images/grafana_sa_4.png differ diff --git a/docs/images/grafana_sa_5.png b/docs/images/grafana_sa_5.png new file mode 100644 index 0000000000..0694652cae Binary files /dev/null and b/docs/images/grafana_sa_5.png differ diff --git a/docs/images/keycloakauth.png b/docs/images/keycloakauth.png new file mode 100644 index 0000000000..2db67636dc Binary files /dev/null and b/docs/images/keycloakauth.png differ diff --git a/docs/images/kibana/api-keys.png b/docs/images/kibana/api-keys.png new file mode 100644 index 0000000000..6dec5730a1 Binary files /dev/null and b/docs/images/kibana/api-keys.png differ diff --git a/docs/images/kibana/copy-created-key.png b/docs/images/kibana/copy-created-key.png new file mode 100644 index 0000000000..878fa36335 Binary files /dev/null and b/docs/images/kibana/copy-created-key.png differ diff --git a/docs/images/kibana/create-api-key.png b/docs/images/kibana/create-api-key.png new file mode 100644 index 0000000000..cc48a5545e Binary files /dev/null and b/docs/images/kibana/create-api-key.png differ diff --git a/docs/images/kibana/kibana_host.png b/docs/images/kibana/kibana_host.png new file mode 100644 index 0000000000..95a322bd26 Binary files /dev/null and b/docs/images/kibana/kibana_host.png differ diff --git a/docs/images/mailgun_email_address.png b/docs/images/mailgun_email_address.png new file mode 100644 index 0000000000..0736a67c5e Binary files /dev/null and b/docs/images/mailgun_email_address.png differ diff --git a/docs/images/mailgun_extraction.png b/docs/images/mailgun_extraction.png new file mode 100644 index 0000000000..5029f01243 Binary files /dev/null and b/docs/images/mailgun_extraction.png differ diff --git a/docs/images/maintenance-window-creation.png b/docs/images/maintenance-window-creation.png new file mode 100644 index 0000000000..dde79e05e6 Binary files /dev/null and b/docs/images/maintenance-window-creation.png differ diff --git a/docs/images/openalert.png b/docs/images/openalert.png new file mode 100644 index 0000000000..d1b70db24b Binary files /dev/null and b/docs/images/openalert.png differ diff --git a/docs/images/presets/convert-to-cel.png b/docs/images/presets/convert-to-cel.png new file mode 100644 index 0000000000..3d15417a11 Binary files /dev/null and b/docs/images/presets/convert-to-cel.png differ diff --git a/docs/images/presets/converted-sql-to-cel.png b/docs/images/presets/converted-sql-to-cel.png new file mode 100644 index 0000000000..15d54b2135 Binary files /dev/null and b/docs/images/presets/converted-sql-to-cel.png differ diff --git a/docs/images/presets/import-from-sql.png b/docs/images/presets/import-from-sql.png new file mode 100644 index 0000000000..7ac2002b58 Binary files /dev/null and b/docs/images/presets/import-from-sql.png differ diff --git a/docs/images/presets/invalid-sentry-cel.png b/docs/images/presets/invalid-sentry-cel.png new file mode 100644 index 0000000000..5803ac5562 Binary files /dev/null and b/docs/images/presets/invalid-sentry-cel.png differ diff --git a/docs/images/presets/preset-created.png b/docs/images/presets/preset-created.png new file mode 100644 index 0000000000..5a053239d6 Binary files /dev/null and b/docs/images/presets/preset-created.png differ diff --git a/docs/images/presets/save-preset-modal.png b/docs/images/presets/save-preset-modal.png new file mode 100644 index 0000000000..6e85c1498a Binary files /dev/null and b/docs/images/presets/save-preset-modal.png differ diff --git a/docs/images/presets/save-preset.png b/docs/images/presets/save-preset.png new file mode 100644 index 0000000000..6a83825a19 Binary files /dev/null and b/docs/images/presets/save-preset.png differ diff --git a/docs/images/presets/valid-sentry-cel.png b/docs/images/presets/valid-sentry-cel.png new file mode 100644 index 0000000000..6e0e9a9a9b Binary files /dev/null and b/docs/images/presets/valid-sentry-cel.png differ diff --git a/docs/images/providerdelete.png b/docs/images/providerdelete.png new file mode 100644 index 0000000000..6c31912313 Binary files /dev/null and b/docs/images/providerdelete.png differ diff --git a/docs/images/providerinstall.png b/docs/images/providerinstall.png new file mode 100644 index 0000000000..28cb922a8f Binary files /dev/null and b/docs/images/providerinstall.png differ diff --git a/docs/images/providermain.png b/docs/images/providermain.png new file mode 100644 index 0000000000..c82f800e36 Binary files /dev/null and b/docs/images/providermain.png differ diff --git a/docs/images/providers.png b/docs/images/providers.png new file mode 100644 index 0000000000..9fc19913cf Binary files /dev/null and b/docs/images/providers.png differ diff --git a/docs/images/providerwebhook.png b/docs/images/providerwebhook.png new file mode 100644 index 0000000000..aab18cd17a Binary files /dev/null and b/docs/images/providerwebhook.png differ diff --git a/docs/images/rule-creation.png b/docs/images/rule-creation.png new file mode 100644 index 0000000000..5b84df6374 Binary files /dev/null and b/docs/images/rule-creation.png differ diff --git a/docs/images/rule-table.png b/docs/images/rule-table.png new file mode 100644 index 0000000000..f2e8400a95 Binary files /dev/null and b/docs/images/rule-table.png differ diff --git a/docs/images/runworkflow.png b/docs/images/runworkflow.png new file mode 100644 index 0000000000..8eabf31b0c Binary files /dev/null and b/docs/images/runworkflow.png differ diff --git a/docs/images/settings_apikey.png b/docs/images/settings_apikey.png new file mode 100644 index 0000000000..a1060981d5 Binary files /dev/null and b/docs/images/settings_apikey.png differ diff --git a/docs/images/settings_smtp.png b/docs/images/settings_smtp.png new file mode 100644 index 0000000000..e609a59e60 Binary files /dev/null and b/docs/images/settings_smtp.png differ diff --git a/docs/images/settings_users.png b/docs/images/settings_users.png new file mode 100644 index 0000000000..0e4bc7fd7c Binary files /dev/null and b/docs/images/settings_users.png differ diff --git a/docs/images/settings_webhook.png b/docs/images/settings_webhook.png new file mode 100644 index 0000000000..1959def2c0 Binary files /dev/null and b/docs/images/settings_webhook.png differ diff --git a/docs/images/signalfx_accesstoken.png b/docs/images/signalfx_accesstoken.png new file mode 100644 index 0000000000..e948aad1de Binary files /dev/null and b/docs/images/signalfx_accesstoken.png differ diff --git a/docs/images/signalfx_keeo.png b/docs/images/signalfx_keeo.png new file mode 100644 index 0000000000..84e5e93730 Binary files /dev/null and b/docs/images/signalfx_keeo.png differ diff --git a/docs/images/signalfx_keep.png b/docs/images/signalfx_keep.png new file mode 100644 index 0000000000..84e5e93730 Binary files /dev/null and b/docs/images/signalfx_keep.png differ diff --git a/docs/images/signalfx_keep2.png b/docs/images/signalfx_keep2.png new file mode 100644 index 0000000000..82fad9388f Binary files /dev/null and b/docs/images/signalfx_keep2.png differ diff --git a/docs/images/signalfx_limitation.png b/docs/images/signalfx_limitation.png new file mode 100644 index 0000000000..88c226a1e6 Binary files /dev/null and b/docs/images/signalfx_limitation.png differ diff --git a/docs/images/signalfx_manual_1.png b/docs/images/signalfx_manual_1.png new file mode 100644 index 0000000000..1cbdec2e24 Binary files /dev/null and b/docs/images/signalfx_manual_1.png differ diff --git a/docs/images/signalfx_manual_2.png b/docs/images/signalfx_manual_2.png new file mode 100644 index 0000000000..6c6fc447d9 Binary files /dev/null and b/docs/images/signalfx_manual_2.png differ diff --git a/docs/images/signalfx_manual_3.png b/docs/images/signalfx_manual_3.png new file mode 100644 index 0000000000..0416dff4c6 Binary files /dev/null and b/docs/images/signalfx_manual_3.png differ diff --git a/docs/images/signalfx_manual_4.png b/docs/images/signalfx_manual_4.png new file mode 100644 index 0000000000..d8061b5842 Binary files /dev/null and b/docs/images/signalfx_manual_4.png differ diff --git a/docs/images/signalfx_manual_5.png b/docs/images/signalfx_manual_5.png new file mode 100644 index 0000000000..f9d8cde60a Binary files /dev/null and b/docs/images/signalfx_manual_5.png differ diff --git a/docs/images/signalfx_manual_6.png b/docs/images/signalfx_manual_6.png new file mode 100644 index 0000000000..e7130de126 Binary files /dev/null and b/docs/images/signalfx_manual_6.png differ diff --git a/docs/images/signalfx_orgid.png b/docs/images/signalfx_orgid.png new file mode 100644 index 0000000000..c8bcf62e68 Binary files /dev/null and b/docs/images/signalfx_orgid.png differ diff --git a/docs/images/slack/slack-oauth.png b/docs/images/slack/slack-oauth.png new file mode 100644 index 0000000000..3341f8c788 Binary files /dev/null and b/docs/images/slack/slack-oauth.png differ diff --git a/docs/images/slack/slack-workflow.png b/docs/images/slack/slack-workflow.png new file mode 100644 index 0000000000..2eecd2eead Binary files /dev/null and b/docs/images/slack/slack-workflow.png differ diff --git a/docs/images/token.jpeg b/docs/images/token.jpeg new file mode 100644 index 0000000000..95048f7f72 Binary files /dev/null and b/docs/images/token.jpeg differ diff --git a/docs/images/user-key.jpeg b/docs/images/user-key.jpeg new file mode 100644 index 0000000000..3c320a2f55 Binary files /dev/null and b/docs/images/user-key.jpeg differ diff --git a/docs/images/viewworkflowhistory.png b/docs/images/viewworkflowhistory.png new file mode 100644 index 0000000000..a6dfbc1f0c Binary files /dev/null and b/docs/images/viewworkflowhistory.png differ diff --git a/docs/images/viewworkflowlogs.png b/docs/images/viewworkflowlogs.png new file mode 100644 index 0000000000..d52fb9a9d2 Binary files /dev/null and b/docs/images/viewworkflowlogs.png differ diff --git a/docs/images/zabbix_role.png b/docs/images/zabbix_role.png new file mode 100644 index 0000000000..18ad89bd7b Binary files /dev/null and b/docs/images/zabbix_role.png differ diff --git a/docs/images/zenduty.jpeg b/docs/images/zenduty.jpeg new file mode 100644 index 0000000000..5d34955c40 Binary files /dev/null and b/docs/images/zenduty.jpeg differ diff --git a/docs/logo/dark.png b/docs/logo/dark.png new file mode 100644 index 0000000000..2793f1d737 Binary files /dev/null and b/docs/logo/dark.png differ diff --git a/docs/logo/light.png b/docs/logo/light.png new file mode 100644 index 0000000000..70b46827a0 Binary files /dev/null and b/docs/logo/light.png differ diff --git a/docs/mint.json b/docs/mint.json new file mode 100644 index 0000000000..2504bd46e0 --- /dev/null +++ b/docs/mint.json @@ -0,0 +1,581 @@ +{ + "$schema": "https://mintlify.com/schema.json", + "name": "Keep", + "logo": { + "light": "/logo/light.png", + "dark": "/logo/dark.png" + }, + "favicon": "/favicon.svg", + "colors": { + "primary": "#FA9E34", + "light": "#FA9E34", + "dark": "#FF9F36" + }, + "topbarCtaButton": { + "type": "github", + "url": "https://github.com/keephq/keep" + }, + "topbarLinks": [ + { + "name": "Platform", + "url": "https://platform.keephq.dev/" + } + ], + "analytics": { + "posthog": { + "apiKey": "phc_mYqciA4RO5g48K6KnmZtftn5xQa5625Aao7vsVC0gJ9" + } + }, + "anchors": [], + "navigation": [ + { + "group": "Overview", + "pages": [ + "overview/introduction", + "overview/usecases", + "overview/comparisons", + "overview/keyconcepts", + "overview/ruleengine", + "overview/presets", + { + "group": "Enrichments", + "pages": [ + "overview/enrichment/extraction", + "overview/enrichment/mapping" + ] + }, + "overview/maintenance-windows", + "overview/deduplication", + "overview/examples" + ] + }, + { + "group": "Development", + "pages": ["development/getting-started", "development/external-url"] + }, + { + "group": "Deployment", + "pages": [ + "deployment/configuration", + { + "group": "Authentication", + "pages": [ + "deployment/authentication/overview", + "deployment/authentication/no-auth", + "deployment/authentication/db-auth", + "deployment/authentication/auth0-auth", + "deployment/authentication/keycloak-auth", + "deployment/authentication/oauth2proxy-auth" + ] + }, + { + "group": "Provision", + "pages": [ + "deployment/provision/overview", + "deployment/provision/provider", + "deployment/provision/workflow", + "deployment/provision/dashboard" + ] + }, + "deployment/secret-manager", + "deployment/docker", + "deployment/kubernetes", + "deployment/openshift", + "deployment/ecs", + "deployment/gke", + "deployment/stress-testing" + ] + }, + { + "group": "Platform", + "pages": [ + "platform/overview", + "platform/alerts", + "platform/alertseverityandstatus", + "platform/settings", + "platform/support" + ] + }, + { + "group": "Providers", + "pages": [ + "providers/overview", + "providers/fingerprints", + "providers/adding-a-new-provider", + "providers/getting-started", + "providers/what-is-a-provider", + { + "group": "Supported Providers", + "pages": [ + "providers/documentation/aks-provider", + "providers/documentation/appdynamics-provider", + "providers/documentation/auth0-provider", + "providers/documentation/axiom-provider", + "providers/documentation/azuremonitoring-provider", + "providers/documentation/bash-provider", + "providers/documentation/bigquery-provider", + "providers/documentation/centreon-provider", + "providers/documentation/clickhouse-provider", + "providers/documentation/cloudwatch-provider", + "providers/documentation/console-provider", + "providers/documentation/coralogix-provider", + "providers/documentation/datadog-provider", + "providers/documentation/discord-provider", + "providers/documentation/dynatrace-provider", + "providers/documentation/elastic-provider", + "providers/documentation/gcpmonitoring-provider", + "providers/documentation/github-provider", + "providers/documentation/github_workflows_provider", + "providers/documentation/gitlab-provider", + "providers/documentation/gitlabpipelines-provider", + "providers/documentation/gke-provider", + "providers/documentation/google_chat-provider", + "providers/documentation/grafana-provider", + "providers/documentation/grafana_incident-provider", + "providers/documentation/grafana_oncall-provider", + "providers/documentation/http-provider", + "providers/documentation/ilert-provider", + "providers/documentation/incidentio-provider", + "providers/documentation/incidentmanager-provider", + "providers/documentation/jira-on-prem-provider", + "providers/documentation/jira-provider", + "providers/documentation/kafka-provider", + "providers/documentation/keep-provider", + "providers/documentation/kibana-provider", + "providers/documentation/kubernetes-provider", + "providers/documentation/linear_provider", + "providers/documentation/linearb-provider", + "providers/documentation/mailchimp-provider", + "providers/documentation/mailgun-provider", + "providers/documentation/mattermost-provider", + "providers/documentation/microsoft-planner-provider", + "providers/documentation/mock-provider", + "providers/documentation/mongodb-provider", + "providers/documentation/mysql-provider", + "providers/documentation/netdata-provider", + "providers/documentation/new-relic-provider", + "providers/documentation/ntfy-provider", + "providers/documentation/openobserve-provider", + "providers/documentation/openshift-provider", + "providers/documentation/opsgenie-provider", + "providers/documentation/pagerduty-provider", + "providers/documentation/pagertree-provider", + "providers/documentation/parseable-provider", + "providers/documentation/pingdom-provider", + "providers/documentation/planner-provider", + "providers/documentation/postgresql-provider", + "providers/documentation/prometheus-provider", + "providers/documentation/pushover-provider", + "providers/documentation/python-provider", + "providers/documentation/quickchart-provider", + "providers/documentation/redmine-provider", + "providers/documentation/resend-provider", + "providers/documentation/rollbar-provider", + "providers/documentation/sendgrid-provider", + "providers/documentation/sentry-provider", + "providers/documentation/service-now-provider", + "providers/documentation/signalfx-provider", + "providers/documentation/signl4-provider", + "providers/documentation/site24x7-provider", + "providers/documentation/slack-provider", + "providers/documentation/smtp-provider", + "providers/documentation/snowflake-provider", + "providers/documentation/splunk-provider", + "providers/documentation/squadcast-provider", + "providers/documentation/ssh-provider", + "providers/documentation/statuscake-provider", + "providers/documentation/sumologic-provider", + "providers/documentation/teams-provider", + "providers/documentation/telegram-provider", + "providers/documentation/template", + "providers/documentation/trello-provider", + "providers/documentation/twilio-provider", + "providers/documentation/uptimekuma-provider", + "providers/documentation/victoriametrics-provider", + "providers/documentation/webhook-provider", + "providers/documentation/websocket-provider", + "providers/documentation/zabbix-provider", + "providers/documentation/zenduty-provider" + ] + + } + ] + }, + { + "group": "Workflows", + "pages": [ + "workflows/overview", + { + "group": "Syntax", + "pages": [ + "workflows/syntax/basic-syntax", + "workflows/syntax/foreach-syntax", + "workflows/syntax/context-syntax" + ] + }, + { + "group": "Conditions", + "pages": [ + "workflows/conditions/what-is-a-condition", + "workflows/conditions/threshold", + "workflows/conditions/assert", + "workflows/conditions/stddev" + ] + }, + { + "group": "Functions", + "pages": [ + "workflows/functions/what-is-a-function", + "workflows/functions/all", + "workflows/functions/add-time-to-date", + "workflows/functions/diff", + "workflows/functions/len", + "workflows/functions/split", + "workflows/functions/first", + "workflows/functions/utcnow", + "workflows/functions/to-utc", + "workflows/functions/datetime-compare", + "workflows/functions/encode", + "workflows/functions/last", + "workflows/functions/lowercase", + "workflows/functions/uppercase" + ] + }, + { + "group": "Throttles", + "pages": [ + "workflows/throttles/what-is-a-throttle", + "workflows/throttles/one-until-resolved" + ] + }, + { + "group": "Examples", + "pages": [ + "workflows/examples/multi-step-alert", + "workflows/examples/reusable-action-alert" + ] + }, + "workflows/state" + ] + }, + { + "group": "Keep API", + "pages": [ + { + "group": "providers", + "pages": [ + "api-ref/providers/get-providers", + "api-ref/providers/get-installed-providers", + "api-ref/providers/get-alerts-configuration", + "api-ref/providers/get-logs", + "api-ref/providers/get-alerts-schema", + "api-ref/providers/get-alert-count", + "api-ref/providers/add-alert", + "api-ref/providers/test-provider", + "api-ref/providers/delete-provider", + "api-ref/providers/validate-provider-scopes", + "api-ref/providers/update-provider", + "api-ref/providers/install-provider", + "api-ref/providers/install-provider-oauth2", + "api-ref/providers/invoke-provider-method", + "api-ref/providers/install-provider-webhook", + "api-ref/providers/get-webhook-settings", + "api-ref/providers/export-providers" + ] + }, + { + "group": "actions", + "pages": [ + "api-ref/actions/get-actions", + "api-ref/actions/create-actions", + "api-ref/actions/put-action", + "api-ref/actions/delete-action", + "api-ref/actions/add-actions", + "api-ref/actions/update-action" + ] + }, + { + "group": "healthcheck", + "pages": ["api-ref/healthcheck/healthcheck"] + }, + { + "group": "topology", + "pages": [ + "api-ref/topology/get-topology-data", + "api-ref/topology/create-application", + "api-ref/topology/delete-application", + "api-ref/topology/get-applications", + "api-ref/topology/update-application" + ] + }, + { + "group": "alerts", + "pages": [ + "api-ref/alerts/get-all-alerts", + "api-ref/alerts/delete-alert", + "api-ref/alerts/get-alert-history", + "api-ref/alerts/assign-alert", + "api-ref/alerts/receive-generic-event", + "api-ref/alerts/webhook-challenge", + "api-ref/alerts/receive-event", + "api-ref/alerts/get-alert", + "api-ref/alerts/get-multiple-fingerprint-alert-audit", + "api-ref/alerts/enrich-alert", + "api-ref/alerts/unenrich-alert", + "api-ref/alerts/search-alerts", + "api-ref/alerts/get-alert-audit", + "api-ref/alerts/get-alerts" + ] + }, + { + "group": "deduplications", + "pages": [ + "api-ref/deduplications/create-deduplication-rule", + "api-ref/deduplications/delete-deduplication-rule", + "api-ref/deduplications/get-deduplication-fields", + "api-ref/deduplications/get-deduplications", + "api-ref/deduplications/update-deduplication-rule" + ] + }, + { + "group": "maintenance", + "pages": [ + "api-ref/maintenance/create-maintenance-rule", + "api-ref/maintenance/delete-maintenance-rule", + "api-ref/maintenance/get-maintenance-rules", + "api-ref/maintenance/update-maintenance-rule" + ] + }, + { + "group": "incidents", + "pages": [ + "api-ref/incidents/change-incident-status", + "api-ref/incidents/create-incident-endpoint", + "api-ref/incidents/get-all-incidents", + "api-ref/incidents/get-incident", + "api-ref/incidents/update-incident", + "api-ref/incidents/delete-incident", + "api-ref/incidents/update-incident-1", + "api-ref/incidents/get-incident-alerts", + "api-ref/incidents/add-alerts-to-incident", + "api-ref/incidents/delete-alerts-from-incident", + "api-ref/incidents/confirm-incident" + ] + }, + { + "group": "settings", + "pages": [ + "api-ref/settings/webhook-settings", + "api-ref/settings/get-users", + "api-ref/settings/create-user", + "api-ref/settings/delete-user", + "api-ref/settings/get-smtp-settings", + "api-ref/settings/update-smtp-settings", + "api-ref/settings/delete-smtp-settings", + "api-ref/settings/test-smtp-settings", + "api-ref/settings/update-api-key", + "api-ref/settings/create-key", + "api-ref/settings/get-keys", + "api-ref/settings/delete-api-key", + "api-ref/settings/get-sso-settings" + ] + }, + { + "group": "workflows", + "pages": [ + "api-ref/workflows/get-workflows", + "api-ref/workflows/create-workflow", + "api-ref/workflows/export-workflows", + "api-ref/workflows/run-workflow", + "api-ref/workflows/run-workflow-from-definition", + "api-ref/workflows/create-workflow-from-body", + "api-ref/workflows/get-random-workflow-templates", + "api-ref/workflows/get-workflow-by-id", + "api-ref/workflows/update-workflow-by-id", + "api-ref/workflows/delete-workflow-by-id", + "api-ref/workflows/get-raw-workflow-by-id", + "api-ref/workflows/get-workflow-executions-by-alert-fingerprint", + "api-ref/workflows/get-workflow-execution-status", + "api-ref/workflows/get-workflow-executions" + ] + }, + { + "group": "whoami", + "pages": ["api-ref/whoami/get-tenant-id"] + }, + { + "group": "pusher", + "pages": ["api-ref/pusher/pusher-authentication"] + }, + { + "group": "status", + "pages": ["api-ref/status/status"] + }, + { + "group": "rules", + "pages": [ + "api-ref/rules/get-rules", + "api-ref/rules/create-rule", + "api-ref/rules/update-rule", + "api-ref/rules/delete-rule" + ] + }, + { + "group": "preset", + "pages": [ + "api-ref/preset/get-presets", + "api-ref/preset/create-preset", + "api-ref/preset/update-preset", + "api-ref/preset/delete-preset", + "api-ref/preset/get-preset-alerts", + "api-ref/preset/create-preset-tab", + "api-ref/preset/delete-tab" + ] + }, + { + "group": "enrichment", + "pages": [ + "api-ref/enrichment/get-rules", + "api-ref/enrichment/update-rule", + "api-ref/enrichment/create-rule", + "api-ref/enrichment/delete-rule", + "api-ref/enrichment/get-extraction-rules", + "api-ref/enrichment/create-extraction-rule", + "api-ref/enrichment/update-extraction-rule", + "api-ref/enrichment/delete-extraction-rule" + ] + }, + { + "group": "auth", + "pages": [ + "api-ref/auth/get-groups", + "api-ref/auth/create-group", + "api-ref/auth/update-group", + "api-ref/auth/delete-group", + "api-ref/auth/get-permissions", + "api-ref/auth/create-permissions", + "api-ref/auth/get-scopes", + "api-ref/auth/get-roles", + "api-ref/auth/create-role", + "api-ref/auth/update-role", + "api-ref/auth/delete-role", + "api-ref/auth/get-users", + "api-ref/auth/create-user", + "api-ref/auth/update-user", + "api-ref/auth/delete-user" + ] + }, + { + "group": "metrics", + "pages": ["api-ref/metrics/get-metrics"] + }, + { + "group": "users", + "pages": [ + "api-ref/users/create-user", + "api-ref/users/delete-user", + "api-ref/users/get-users", + "api-ref/users/update-user" + ] + }, + { + "group": "groups", + "pages": ["api-ref/groups/get-groups"] + }, + { + "group": "mappings", + "pages": [ + "api-ref/mapping/create-mapping", + "api-ref/mapping/delete-mapping-by-id", + "api-ref/mapping/get-mappings" + ] + }, + { + "group": "dashboard", + "pages": [ + "api-ref/dashboard/read-dashboards", + "api-ref/dashboard/create-dashboard", + "api-ref/dashboard/update-dashboard", + "api-ref/dashboard/delete-dashboard" + ] + }, + { + "group": "tags", + "pages": ["api-ref/tags/get-tags"] + } + ] + }, + { + "group": "Keep CLI", + "pages": [ + "cli/overview", + "cli/installation", + "cli/github-actions", + { + "group": "Commands", + "pages": [ + { + "group": "keep alert", + "pages": [ + "cli/commands/cli-alert", + "cli/commands/alert-enrich", + "cli/commands/alert-get", + "cli/commands/alert-list" + ] + }, + { + "group": "keep provider", + "pages": [ + "cli/commands/cli-provider", + "cli/commands/provider-connect", + "cli/commands/provider-delete", + "cli/commands/provider-list" + ] + }, + { + "group": "keep workflow", + "pages": [ + "cli/commands/cli-workflow", + "cli/commands/workflow-apply", + "cli/commands/workflow-list", + "cli/commands/workflow-run", + "cli/commands/workflow-runs", + { + "group": "keep workflow runs", + "pages": ["cli/commands/runs-logs", "cli/commands/runs-list"] + } + ] + }, + { + "group": "keep mappings", + "pages": [ + "cli/commands/mappings-list", + "cli/commands/mappings-create", + "cli/commands/mappings-delete" + ] + }, + { + "group": "keep extractions", + "pages": [ + "cli/commands/extraction-create", + "cli/commands/extraction-delete", + "cli/commands/extractions-list" + ] + }, + "cli/commands/cli", + "cli/commands/cli-api", + "cli/commands/cli-config-new", + "cli/commands/cli-config-show", + "cli/commands/cli-run", + "cli/commands/cli-config", + "cli/commands/cli-version", + "cli/commands/cli-whoami" + ] + } + ] + } + ], + "footerSocials": { + "github": "https://github.com/keephq/keep" + } +} diff --git a/docs/openapi.json b/docs/openapi.json new file mode 100644 index 0000000000..5df0d0f5f0 --- /dev/null +++ b/docs/openapi.json @@ -0,0 +1 @@ +{"openapi": "3.0.2", "info": {"title": "Keep API", "description": "Rest API powering https://platform.keephq.dev and friends \ud83c\udfc4\u200d\u2640\ufe0f", "version": "0.1.0"}, "paths": {"/providers": {"get": {"tags": ["providers"], "summary": "Get Providers", "operationId": "get_providers_providers_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/export": {"get": {"tags": ["providers"], "summary": "Get Installed Providers", "description": "export all installed providers", "operationId": "get_installed_providers_providers_export_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}/configured-alerts": {"get": {"tags": ["providers"], "summary": "Get Alerts Configuration", "description": "Get alerts configuration from a provider", "operationId": "get_alerts_configuration_providers__provider_type___provider_id__configured_alerts_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {}, "type": "array", "title": "Response Get Alerts Configuration Providers Provider Type Provider Id Configured Alerts Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}/logs": {"get": {"tags": ["providers"], "summary": "Get Logs", "description": "Get logs from a provider", "operationId": "get_logs_providers__provider_type___provider_id__logs_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 5}, "name": "limit", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {}, "type": "array", "title": "Response Get Logs Providers Provider Type Provider Id Logs Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/schema": {"get": {"tags": ["providers"], "summary": "Get Alerts Schema", "description": "Get the provider's API schema used to push alerts configuration", "operationId": "get_alerts_schema_providers__provider_type__schema_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Get Alerts Schema Providers Provider Type Schema Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}}}, "/providers/{provider_type}/{provider_id}/alerts/count": {"get": {"tags": ["providers"], "summary": "Get Alert Count", "description": "Get number of alerts a specific provider has received (in a specific time time period or ever)", "operationId": "get_alert_count_providers__provider_type___provider_id__alerts_count_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": true, "schema": {"type": "boolean", "title": "Ever"}, "name": "ever", "in": "query"}, {"required": false, "schema": {"type": "string", "format": "date-time", "title": "Start Time"}, "name": "start_time", "in": "query"}, {"required": false, "schema": {"type": "string", "format": "date-time", "title": "End Time"}, "name": "end_time", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}/alerts": {"post": {"tags": ["providers"], "summary": "Add Alert", "description": "Push new alerts to the provider", "operationId": "add_alert_providers__provider_type___provider_id__alerts_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": false, "schema": {"type": "string", "title": "Alert Id"}, "name": "alert_id", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Alert"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/test": {"post": {"tags": ["providers"], "summary": "Test Provider", "description": "Test a provider's alert retrieval", "operationId": "test_provider_providers_test_post", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Provider Info"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/{provider_id}": {"delete": {"tags": ["providers"], "summary": "Delete Provider", "operationId": "delete_provider_providers__provider_type___provider_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_id}/scopes": {"post": {"tags": ["providers"], "summary": "Validate Provider Scopes", "description": "Validate provider scopes", "operationId": "validate_provider_scopes_providers__provider_id__scopes_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"anyOf": [{"type": "boolean"}, {"type": "string"}]}, "type": "object", "title": "Response Validate Provider Scopes Providers Provider Id Scopes Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_id}": {"put": {"tags": ["providers"], "summary": "Update Provider", "description": "Update provider", "operationId": "update_provider_providers__provider_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/install": {"post": {"tags": ["providers"], "summary": "Install Provider", "operationId": "install_provider_providers_install_post", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/install/oauth2/{provider_type}": {"post": {"tags": ["providers"], "summary": "Install Provider Oauth2", "operationId": "install_provider_oauth2_providers_install_oauth2__provider_type__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Provider Info"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_id}/invoke/{method}": {"post": {"tags": ["providers"], "summary": "Invoke Provider Method", "description": "Invoke provider special method", "operationId": "invoke_provider_method_providers__provider_id__invoke__method__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Method"}, "name": "method", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Method Params"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/install/webhook/{provider_type}/{provider_id}": {"post": {"tags": ["providers"], "summary": "Install Provider Webhook", "operationId": "install_provider_webhook_providers_install_webhook__provider_type___provider_id__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/providers/{provider_type}/webhook": {"get": {"tags": ["providers"], "summary": "Get Webhook Settings", "operationId": "get_webhook_settings_providers__provider_type__webhook_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/ProviderWebhookSettings"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/actions": {"get": {"tags": ["actions"], "summary": "Get Actions", "description": "Get all actions", "operationId": "get_actions_actions_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["actions"], "summary": "Create Actions", "description": "Create new actions by uploading a file", "operationId": "create_actions_actions_post", "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_create_actions_actions_post"}}}}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/actions/{action_id}": {"put": {"tags": ["actions"], "summary": "Put Action", "description": "Update an action", "operationId": "put_action_actions__action_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Action Id"}, "name": "action_id", "in": "path"}], "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_put_action_actions__action_id__put"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["actions"], "summary": "Delete Action", "description": "Delete an action", "operationId": "delete_action_actions__action_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Action Id"}, "name": "action_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/healthcheck": {"get": {"tags": ["healthcheck"], "summary": "Healthcheck", "description": "simple healthcheck endpoint", "operationId": "healthcheck_healthcheck_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Healthcheck Healthcheck Get"}}}}}}}, "/alerts": {"get": {"tags": ["alerts"], "summary": "Get All Alerts", "description": "Get last alerts occurrence", "operationId": "get_all_alerts_alerts_get", "parameters": [{"required": false, "schema": {"type": "integer", "title": "Limit", "default": 1000}, "name": "limit", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Get All Alerts Alerts Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["alerts"], "summary": "Delete Alert", "description": "Delete alert by finerprint and last received time", "operationId": "delete_alert_alerts_delete", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DeleteRequestBody"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Delete Alert Alerts Delete"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}/history": {"get": {"tags": ["alerts"], "summary": "Get Alert History", "description": "Get alert history", "operationId": "get_alert_history_alerts__fingerprint__history_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Get Alert History Alerts Fingerprint History Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}/assign/{last_received}": {"post": {"tags": ["alerts"], "summary": "Assign Alert", "description": "Assign alert to user", "operationId": "assign_alert_alerts__fingerprint__assign__last_received__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Last Received"}, "name": "last_received", "in": "path"}, {"required": false, "schema": {"type": "boolean", "title": "Unassign", "default": false}, "name": "unassign", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Assign Alert Alerts Fingerprint Assign Last Received Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/event": {"post": {"tags": ["alerts"], "summary": "Receive Generic Event", "description": "Receive a generic alert event", "operationId": "receive_generic_event_alerts_event_post", "parameters": [{"required": false, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/AlertDto"}, {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array"}, {"type": "object"}], "title": "Event"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"$ref": "#/components/schemas/AlertDto"}, {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array"}], "title": "Response Receive Generic Event Alerts Event Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/event/netdata": {"get": {"tags": ["alerts"], "summary": "Webhook Challenge", "description": "Helper function to complete Netdata webhook challenge", "operationId": "webhook_challenge_alerts_event_netdata_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}}}, "/alerts/event/{provider_type}": {"post": {"tags": ["alerts"], "summary": "Receive Event", "description": "Receive an alert event from a provider", "operationId": "receive_event_alerts_event__provider_type__post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Provider Type"}, "name": "provider_type", "in": "path"}, {"required": false, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "query"}], "requestBody": {"content": {"application/json": {"schema": {"anyOf": [{"type": "object"}, {"type": "string", "format": "binary"}], "title": "Event"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Receive Event Alerts Event Provider Type Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}": {"get": {"tags": ["alerts"], "summary": "Get Alert", "description": "Get alert by fingerprint", "operationId": "get_alert_alerts__fingerprint__get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/AlertDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/enrich": {"post": {"tags": ["alerts"], "summary": "Enrich Alert", "description": "Enrich an alert", "operationId": "enrich_alert_alerts_enrich_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/EnrichAlertRequestBody"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Enrich Alert Alerts Enrich Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/unenrich": {"post": {"tags": ["alerts"], "summary": "Unenrich Alert", "description": "Un-Enrich an alert", "operationId": "unenrich_alert_alerts_unenrich_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/UnEnrichAlertRequestBody"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Response Unenrich Alert Alerts Unenrich Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/search": {"post": {"tags": ["alerts"], "summary": "Search Alerts", "description": "Search alerts", "operationId": "search_alerts_alerts_search_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/SearchAlertsRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Search Alerts Alerts Search Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/audit": {"post": {"tags": ["alerts"], "summary": "Get Multiple Fingerprint Alert Audit", "description": "Get alert timeline audit trail for multiple fingerprints", "operationId": "get_multiple_fingerprint_alert_audit_alerts_audit_post", "requestBody": {"content": {"application/json": {"schema": {"items": {"type": "string"}, "type": "array", "title": "Fingerprints"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertAuditDto"}, "type": "array", "title": "Response Get Multiple Fingerprint Alert Audit Alerts Audit Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/alerts/{fingerprint}/audit": {"get": {"tags": ["alerts"], "summary": "Get Alert Audit", "description": "Get alert timeline audit trail", "operationId": "get_alert_audit_alerts__fingerprint__audit_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Fingerprint"}, "name": "fingerprint", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertAuditDto"}, "type": "array", "title": "Response Get Alert Audit Alerts Fingerprint Audit Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents": {"get": {"tags": ["incidents"], "summary": "Get All Incidents", "description": "Get last incidents", "operationId": "get_all_incidents_incidents_get", "parameters": [{"required": false, "schema": {"type": "boolean", "title": "Confirmed", "default": true}, "name": "confirmed", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}, {"required": false, "schema": {"allOf": [{"$ref": "#/components/schemas/IncidentSorting"}], "default": "creation_time"}, "name": "sorting", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentsPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["incidents"], "summary": "Create Incident Endpoint", "description": "Create new incident", "operationId": "create_incident_endpoint_incidents_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDtoIn"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}": {"get": {"tags": ["incidents"], "summary": "Get Incident", "description": "Get incident by id", "operationId": "get_incident_incidents__incident_id__get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "put": {"tags": ["incidents"], "summary": "Update Incident", "description": "Update incident by id", "operationId": "update_incident_incidents__incident_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["incidents"], "summary": "Delete Incident", "description": "Delete incident by incident id", "operationId": "delete_incident_incidents__incident_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/alerts": {"get": {"tags": ["incidents"], "summary": "Get Incident Alerts", "description": "Get incident alerts by incident incident id", "operationId": "get_incident_alerts_incidents__incident_id__alerts_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/AlertPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["incidents"], "summary": "Add Alerts To Incident", "description": "Add alerts to incident", "operationId": "add_alerts_to_incident_incidents__incident_id__alerts_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Alert Ids"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Add Alerts To Incident Incidents Incident Id Alerts Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["incidents"], "summary": "Delete Alerts From Incident", "description": "Delete alerts from incident", "operationId": "delete_alerts_from_incident_incidents__incident_id__alerts_delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Alert Ids"}}}, "required": true}, "responses": {"202": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Response Delete Alerts From Incident Incidents Incident Id Alerts Delete"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/confirm": {"post": {"tags": ["incidents"], "summary": "Confirm Incident", "description": "Confirm predicted incident by id", "operationId": "confirm_incident_incidents__incident_id__confirm_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/incidents/{incident_id}/status": {"post": {"tags": ["incidents"], "summary": "Change Incident Status", "description": "Change incident status", "operationId": "change_incident_status_incidents__incident_id__status_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Incident Id"}, "name": "incident_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentStatusChangeDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/IncidentDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/webhook": {"get": {"tags": ["settings"], "summary": "Webhook Settings", "description": "Get details about the webhook endpoint (e.g. the API url and an API key)", "operationId": "webhook_settings_settings_webhook_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WebhookSettings"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/users": {"get": {"tags": ["settings"], "summary": "Get Users", "description": "Get all users", "operationId": "get_users_settings_users_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/User"}, "type": "array", "title": "Response Get Users Settings Users Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["settings"], "summary": "Create User", "description": "Create a user", "operationId": "create_user_settings_users_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/keep__api__routes__settings__CreateUserRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/users/{user_email}": {"delete": {"tags": ["settings"], "summary": "Delete User", "description": "Delete a user", "operationId": "delete_user_settings_users__user_email__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "User Email"}, "name": "user_email", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/smtp": {"get": {"tags": ["settings"], "summary": "Get Smtp Settings", "description": "Get SMTP settings", "operationId": "get_smtp_settings_settings_smtp_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["settings"], "summary": "Update Smtp Settings", "description": "Install or update SMTP settings", "operationId": "update_smtp_settings_settings_smtp_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/SMTPSettings"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["settings"], "summary": "Delete Smtp Settings", "description": "Delete SMTP settings", "operationId": "delete_smtp_settings_settings_smtp_delete", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/smtp/test": {"post": {"tags": ["settings"], "summary": "Test Smtp Settings", "description": "Test SMTP settings", "operationId": "test_smtp_settings_settings_smtp_test_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/SMTPSettings"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/apikey": {"put": {"tags": ["settings"], "summary": "Update Api Key", "description": "Update API key secret", "operationId": "update_api_key_settings_apikey_put", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["settings"], "summary": "Create Key", "description": "Create API key", "operationId": "create_key_settings_apikey_post", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/apikeys": {"get": {"tags": ["settings"], "summary": "Get Keys", "description": "Get API keys", "operationId": "get_keys_settings_apikeys_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/apikey/{keyId}": {"delete": {"tags": ["settings"], "summary": "Delete Api Key", "description": "Delete API key", "operationId": "delete_api_key_settings_apikey__keyId__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Keyid"}, "name": "keyId", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/settings/sso": {"get": {"tags": ["settings"], "summary": "Get Sso Settings", "operationId": "get_sso_settings_settings_sso_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflows", "description": "Get workflows", "operationId": "get_workflows_workflows_get", "parameters": [{"required": false, "schema": {"type": "boolean", "title": "Is V2", "default": false}, "name": "is_v2", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"anyOf": [{"items": {"$ref": "#/components/schemas/WorkflowDTO"}, "type": "array"}, {"items": {"type": "object"}, "type": "array"}], "title": "Response Get Workflows Workflows Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["workflows", "alerts"], "summary": "Create Workflow", "description": "Create or update a workflow", "operationId": "create_workflow_workflows_post", "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_create_workflow_workflows_post"}}}, "required": true}, "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowCreateOrUpdateDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/export": {"get": {"tags": ["workflows", "alerts"], "summary": "Export Workflows", "description": "export all workflow Yamls", "operationId": "export_workflows_workflows_export_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "string"}, "type": "array", "title": "Response Export Workflows Workflows Export Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}/run": {"post": {"tags": ["workflows", "alerts"], "summary": "Run Workflow", "description": "Run a workflow", "operationId": "run_workflow_workflows__workflow_id__run_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "title": "Body"}}}}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Run Workflow Workflows Workflow Id Run Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/test": {"post": {"tags": ["workflows", "alerts"], "summary": "Run Workflow From Definition", "description": "Test run a workflow from a definition", "operationId": "run_workflow_from_definition_workflows_test_post", "requestBody": {"content": {"multipart/form-data": {"schema": {"$ref": "#/components/schemas/Body_run_workflow_from_definition_workflows_test_post"}}}}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Run Workflow From Definition Workflows Test Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/json": {"post": {"tags": ["workflows", "alerts"], "summary": "Create Workflow From Body", "description": "Create or update a workflow", "operationId": "create_workflow_from_body_workflows_json_post", "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowCreateOrUpdateDTO"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/random-templates": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Random Workflow Templates", "description": "Get random workflow templates", "operationId": "get_random_workflow_templates_workflows_random_templates_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "object"}, "type": "array", "title": "Response Get Random Workflow Templates Workflows Random Templates Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow By Id", "description": "Get workflow executions by ID", "operationId": "get_workflow_by_id_workflows__workflow_id__get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}, {"required": false, "schema": {"type": "integer", "title": "Tab", "default": 1}, "name": "tab", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Limit", "default": 25}, "name": "limit", "in": "query"}, {"required": false, "schema": {"type": "integer", "title": "Offset", "default": 0}, "name": "offset", "in": "query"}, {"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Status"}, "name": "status", "in": "query"}, {"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Trigger"}, "name": "trigger", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Execution Id"}, "name": "execution_id", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowExecutionsPaginatedResultsDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "put": {"tags": ["workflows", "alerts"], "summary": "Update Workflow By Id", "description": "Update a workflow", "operationId": "update_workflow_by_id_workflows__workflow_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "responses": {"201": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowCreateOrUpdateDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["workflows", "alerts"], "summary": "Delete Workflow By Id", "description": "Delete workflow", "operationId": "delete_workflow_by_id_workflows__workflow_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}/raw": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Raw Workflow By Id", "description": "Get workflow executions by ID", "operationId": "get_raw_workflow_by_id_workflows__workflow_id__raw_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Id"}, "name": "workflow_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "string", "title": "Response Get Raw Workflow By Id Workflows Workflow Id Raw Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/executions": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow Executions By Alert Fingerprint", "description": "Get workflow executions by alert fingerprint", "operationId": "get_workflow_executions_by_alert_fingerprint_workflows_executions_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/WorkflowToAlertExecutionDTO"}, "type": "array", "title": "Response Get Workflow Executions By Alert Fingerprint Workflows Executions Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/{workflow_id}/runs/{workflow_execution_id}": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow Execution Status", "description": "Get a workflow execution status", "operationId": "get_workflow_execution_status_workflows__workflow_id__runs__workflow_execution_id__get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Workflow Execution Id"}, "name": "workflow_execution_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/WorkflowExecutionDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/workflows/executions/list": {"get": {"tags": ["workflows", "alerts"], "summary": "Get Workflow Executions", "description": "List last workflow executions", "operationId": "get_workflow_executions_workflows_executions_list_get", "parameters": [{"description": "Workflow execution ID", "required": false, "schema": {"type": "string", "title": "Workflow Execution Id", "description": "Workflow execution ID"}, "name": "workflow_execution_id", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/WorkflowExecutionDTO"}, "type": "array", "title": "Response Get Workflow Executions Workflows Executions List Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/whoami": {"get": {"tags": ["whoami"], "summary": "Get Tenant Id", "description": "Get tenant id", "operationId": "get_tenant_id_whoami_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Get Tenant Id Whoami Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/pusher/auth": {"post": {"tags": ["pusher"], "summary": "Pusher Authentication", "description": "Authenticate a user to a private channel\n\nArgs:\n request (Request): The request object\n tenant_id (str, optional): The tenant ID. Defaults to Depends(verify_bearer_token).\n pusher_client (Pusher, optional): Pusher client. Defaults to Depends(get_pusher_client).\n\nRaises:\n HTTPException: 403 if the user is not allowed to access the channel.\n\nReturns:\n dict: The authentication response.", "operationId": "pusher_authentication_pusher_auth_post", "requestBody": {"content": {"application/x-www-form-urlencoded": {"schema": {"$ref": "#/components/schemas/Body_pusher_authentication_pusher_auth_post"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Pusher Authentication Pusher Auth Post"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/status": {"get": {"tags": ["status"], "summary": "Status", "description": "simple status endpoint", "operationId": "status_status_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"type": "object", "title": "Response Status Status Get"}}}}}}}, "/rules": {"get": {"tags": ["rules"], "summary": "Get Rules", "description": "Get Rules", "operationId": "get_rules_rules_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["rules"], "summary": "Create Rule", "description": "Create Rule", "operationId": "create_rule_rules_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/RuleCreateDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/rules/{rule_id}": {"put": {"tags": ["rules"], "summary": "Update Rule", "description": "Update Rule", "operationId": "update_rule_rules__rule_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["rules"], "summary": "Delete Rule", "description": "Delete Rule", "operationId": "delete_rule_rules__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset": {"get": {"tags": ["preset"], "summary": "Get Presets", "description": "Get all presets for tenant", "operationId": "get_presets_preset_get", "parameters": [{"required": false, "schema": {"type": "string", "title": "Time Stamp"}, "name": "time_stamp", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/PresetDto"}, "type": "array", "title": "Response Get Presets Preset Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["preset"], "summary": "Create Preset", "description": "Create a preset for tenant", "operationId": "create_preset_preset_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdatePresetDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/PresetDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{uuid}": {"put": {"tags": ["preset"], "summary": "Update Preset", "description": "Update a preset for tenant", "operationId": "update_preset_preset__uuid__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Uuid"}, "name": "uuid", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdatePresetDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/PresetDto"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["preset"], "summary": "Delete Preset", "description": "Delete a preset for tenant", "operationId": "delete_preset_preset__uuid__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Uuid"}, "name": "uuid", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{preset_name}/alerts": {"get": {"tags": ["preset"], "summary": "Get Preset Alerts", "description": "Get a preset for tenant", "operationId": "get_preset_alerts_preset__preset_name__alerts_get", "parameters": [{"required": true, "schema": {"type": "string", "title": "Preset Name"}, "name": "preset_name", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {}, "type": "array", "title": "Response Get Preset Alerts Preset Preset Name Alerts Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{preset_id}/tab": {"post": {"tags": ["preset"], "summary": "Create Preset Tab", "description": "Create a tab for a preset", "operationId": "create_preset_tab_preset__preset_id__tab_post", "parameters": [{"required": true, "schema": {"type": "string", "title": "Preset Id"}, "name": "preset_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreatePresetTab"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/preset/{preset_id}/tab/{tab_id}": {"delete": {"tags": ["preset"], "summary": "Delete Tab", "description": "Delete a tab from a preset", "operationId": "delete_tab_preset__preset_id__tab__tab_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Preset Id"}, "name": "preset_id", "in": "path"}, {"required": true, "schema": {"type": "string", "title": "Tab Id"}, "name": "tab_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/mapping": {"get": {"tags": ["enrichment", "mapping"], "summary": "Get Rules", "description": "Get all mapping rules", "operationId": "get_rules_mapping_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/MappingRuleDtoOut"}, "type": "array", "title": "Response Get Rules Mapping Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["enrichment", "mapping"], "summary": "Create Rule", "description": "Create a new mapping rule", "operationId": "create_rule_mapping_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRuleDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRule"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/mapping/{rule_id}": {"put": {"tags": ["enrichment", "mapping"], "summary": "Update Rule", "description": "Update an existing rule", "operationId": "update_rule_mapping__rule_id__put", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRuleDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MappingRuleDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["enrichment", "mapping"], "summary": "Delete Rule", "description": "Delete a mapping rule", "operationId": "delete_rule_mapping__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/groups": {"get": {"tags": ["auth", "groups"], "summary": "Get Groups", "description": "Get all groups", "operationId": "get_groups_auth_groups_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/Group"}, "type": "array", "title": "Response Get Groups Auth Groups Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "groups"], "summary": "Create Group", "description": "Create a group", "operationId": "create_group_auth_groups_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdateGroupRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/groups/{group_name}": {"put": {"tags": ["auth", "groups"], "summary": "Update Group", "description": "Update a group", "operationId": "update_group_auth_groups__group_name__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Group Name"}, "name": "group_name", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/CreateOrUpdateGroupRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["auth", "groups"], "summary": "Delete Group", "description": "Delete a group", "operationId": "delete_group_auth_groups__group_name__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Group Name"}, "name": "group_name", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/permissions": {"get": {"tags": ["auth", "permissions"], "summary": "Get Permissions", "description": "Get resources permissions", "operationId": "get_permissions_auth_permissions_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/ResourcePermission"}, "type": "array", "title": "Response Get Permissions Auth Permissions Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "permissions"], "summary": "Create Permissions", "description": "Create permissions for resources", "operationId": "create_permissions_auth_permissions_post", "requestBody": {"content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/ResourcePermission"}, "type": "array", "title": "Resource Permissions", "description": "List of resource permissions"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/permissions/scopes": {"get": {"tags": ["auth", "permissions"], "summary": "Get Scopes", "description": "Get all resources types", "operationId": "get_scopes_auth_permissions_scopes_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "string"}, "type": "array", "title": "Response Get Scopes Auth Permissions Scopes Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/roles": {"get": {"tags": ["auth", "roles"], "summary": "Get Roles", "description": "Get roles", "operationId": "get_roles_auth_roles_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/Role"}, "type": "array", "title": "Response Get Roles Auth Roles Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "roles"], "summary": "Create Role", "description": "Create role", "operationId": "create_role_auth_roles_post", "requestBody": {"content": {"application/json": {"schema": {"allOf": [{"$ref": "#/components/schemas/CreateOrUpdateRole"}], "title": "Role", "description": "Role"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/roles/{role_id}": {"put": {"tags": ["auth", "roles"], "summary": "Update Role", "description": "Update role", "operationId": "update_role_auth_roles__role_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Role Id"}, "name": "role_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"allOf": [{"$ref": "#/components/schemas/CreateOrUpdateRole"}], "title": "Role", "description": "Role"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["auth", "roles"], "summary": "Delete Role", "description": "Delete role", "operationId": "delete_role_auth_roles__role_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Role Id"}, "name": "role_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/users": {"get": {"tags": ["auth", "users"], "summary": "Get Users", "description": "Get all users", "operationId": "get_users_auth_users_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/User"}, "type": "array", "title": "Response Get Users Auth Users Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["auth", "users"], "summary": "Create User", "description": "Create a user", "operationId": "create_user_auth_users_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/keep__api__routes__auth__users__CreateUserRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/auth/users/{user_email}": {"put": {"tags": ["auth", "users"], "summary": "Update User", "description": "Update a user", "operationId": "update_user_auth_users__user_email__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "User Email"}, "name": "user_email", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/UpdateUserRequest"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["auth", "users"], "summary": "Delete User", "description": "Delete a user", "operationId": "delete_user_auth_users__user_email__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "User Email"}, "name": "user_email", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/metrics": {"get": {"tags": ["metrics"], "summary": "Get Metrics", "description": "This endpoint is used by Prometheus to scrape such metrics from the application:\n- alerts_total {incident_name, incident_id} - The total number of alerts per incident.\n- open_incidents_total - The total number of open incidents\n\nPlease note that those metrics are per-tenant and are not designed to be used for the monitoring of the application itself.\n\nExample prometheus configuration:\n```\nscrape_configs:\n- job_name: \"scrape_keep\"\n scrape_interval: 5m # It's important to scrape not too often to avoid rate limiting.\n static_configs:\n - targets: [\"https://api.keephq.dev\"] # Or your own domain.\n authorization:\n type: Bearer\n credentials: \"{Your API Key}\"\n\n # Optional, you can add labels to exported incidents. \n # Label values will be equal to the last incident's alert payload value matching the label.\n # Attention! Don't add \"flaky\" labels which could change from alert to alert within the same incident.\n # Good labels: ['labels.department', 'labels.team'], bad labels: ['labels.severity', 'labels.pod_id']\n # Check Keep -> Feed -> \"extraPayload\" column, it will help in writing labels.\n\n params:\n labels: ['labels.service', 'labels.queue']\n # Will resuld as: \"labels_service\" and \"labels_queue\".\n```", "operationId": "get_metrics_metrics_get", "parameters": [{"required": false, "schema": {"items": {"type": "string"}, "type": "array", "title": "Labels"}, "name": "labels", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/extraction": {"get": {"tags": ["enrichment", "extraction"], "summary": "Get Extraction Rules", "description": "Get all extraction rules", "operationId": "get_extraction_rules_extraction_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/ExtractionRuleDtoOut"}, "type": "array", "title": "Response Get Extraction Rules Extraction Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["enrichment", "extraction"], "summary": "Create Extraction Rule", "description": "Create a new extraction rule", "operationId": "create_extraction_rule_extraction_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoBase"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/extraction/{rule_id}": {"put": {"tags": ["enrichment", "extraction"], "summary": "Update Extraction Rule", "description": "Update an existing extraction rule", "operationId": "update_extraction_rule_extraction__rule_id__put", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoBase"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/ExtractionRuleDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["enrichment", "extraction"], "summary": "Delete Extraction Rule", "description": "Delete an extraction rule", "operationId": "delete_extraction_rule_extraction__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/dashboard": {"get": {"tags": ["dashboard"], "summary": "Read Dashboards", "operationId": "read_dashboards_dashboard_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/DashboardResponseDTO"}, "type": "array", "title": "Response Read Dashboards Dashboard Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["dashboard"], "summary": "Create Dashboard", "operationId": "create_dashboard_dashboard_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardCreateDTO"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardResponseDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/dashboard/{dashboard_id}": {"put": {"tags": ["dashboard"], "summary": "Update Dashboard", "operationId": "update_dashboard_dashboard__dashboard_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Dashboard Id"}, "name": "dashboard_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardUpdateDTO"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/DashboardResponseDTO"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["dashboard"], "summary": "Delete Dashboard", "operationId": "delete_dashboard_dashboard__dashboard_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Dashboard Id"}, "name": "dashboard_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/tags": {"get": {"tags": ["tags"], "summary": "Get Tags", "description": "get tags", "operationId": "get_tags_tags_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"type": "object"}, "type": "array", "title": "Response Get Tags Tags Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/maintenance": {"get": {"tags": ["maintenance"], "summary": "Get Maintenance Rules", "description": "Get all maintenance rules", "operationId": "get_maintenance_rules_maintenance_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/MaintenanceRuleRead"}, "type": "array", "title": "Response Get Maintenance Rules Maintenance Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["maintenance"], "summary": "Create Maintenance Rule", "description": "Create a new maintenance rule", "operationId": "create_maintenance_rule_maintenance_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleCreate"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleRead"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/maintenance/{rule_id}": {"put": {"tags": ["maintenance"], "summary": "Update Maintenance Rule", "description": "Update an existing maintenance rule", "operationId": "update_maintenance_rule_maintenance__rule_id__put", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleCreate"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/MaintenanceRuleRead"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["maintenance"], "summary": "Delete Maintenance Rule", "description": "Delete a maintenance rule", "operationId": "delete_maintenance_rule_maintenance__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "integer", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/topology": {"get": {"tags": ["topology"], "summary": "Get Topology Data", "description": "Get all topology data", "operationId": "get_topology_data_topology_get", "parameters": [{"required": false, "schema": {"type": "string", "title": "Provider Id"}, "name": "provider_id", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Service Id"}, "name": "service_id", "in": "query"}, {"required": false, "schema": {"type": "string", "title": "Environment"}, "name": "environment", "in": "query"}, {"required": false, "schema": {"type": "boolean", "title": "Include Empty Deps", "default": false}, "name": "include_empty_deps", "in": "query"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/TopologyServiceDtoOut"}, "type": "array", "title": "Response Get Topology Data Topology Get"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/topology/applications": {"get": {"tags": ["topology"], "summary": "Get Applications", "description": "Get all applications", "operationId": "get_applications_topology_applications_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"items": {"$ref": "#/components/schemas/TopologyApplicationDtoOut"}, "type": "array", "title": "Response Get Applications Topology Applications Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["topology"], "summary": "Create Application", "description": "Create a new application", "operationId": "create_application_topology_applications_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/topology/applications/{application_id}": {"put": {"tags": ["topology"], "summary": "Update Application", "description": "Update an application", "operationId": "update_application_topology_applications__application_id__put", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Application Id"}, "name": "application_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoIn"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/TopologyApplicationDtoOut"}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["topology"], "summary": "Delete Application", "description": "Delete an application", "operationId": "delete_application_topology_applications__application_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "format": "uuid", "title": "Application Id"}, "name": "application_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/deduplications": {"get": {"tags": ["deduplications"], "summary": "Get Deduplications", "description": "Get Deduplications", "operationId": "get_deduplications_deduplications_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "post": {"tags": ["deduplications"], "summary": "Create Deduplication Rule", "description": "Create Deduplication Rule", "operationId": "create_deduplication_rule_deduplications_post", "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DeduplicationRuleRequestDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/deduplications/fields": {"get": {"tags": ["deduplications"], "summary": "Get Deduplication Fields", "description": "Get Optional Fields For Deduplications", "operationId": "get_deduplication_fields_deduplications_fields_get", "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {"additionalProperties": {"items": {"type": "string"}, "type": "array"}, "type": "object", "title": "Response Get Deduplication Fields Deduplications Fields Get"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}, "/deduplications/{rule_id}": {"put": {"tags": ["deduplications"], "summary": "Update Deduplication Rule", "description": "Update Deduplication Rule", "operationId": "update_deduplication_rule_deduplications__rule_id__put", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "requestBody": {"content": {"application/json": {"schema": {"$ref": "#/components/schemas/DeduplicationRuleRequestDto"}}}, "required": true}, "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}, "delete": {"tags": ["deduplications"], "summary": "Delete Deduplication Rule", "description": "Delete Deduplication Rule", "operationId": "delete_deduplication_rule_deduplications__rule_id__delete", "parameters": [{"required": true, "schema": {"type": "string", "title": "Rule Id"}, "name": "rule_id", "in": "path"}], "responses": {"200": {"description": "Successful Response", "content": {"application/json": {"schema": {}}}}, "422": {"description": "Validation Error", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/HTTPValidationError"}}}}}, "security": [{"API Key": []}, {"HTTPBasic": []}, {"OAuth2PasswordBearer": []}]}}}, "components": {"schemas": {"AlertActionType": {"enum": ["alert was triggered", "alert acknowledged", "alert automatically resolved", "alert automatically resolved by API", "alert manually resolved", "alert status manually changed", "alert status changed by API", "alert status undone", "alert enriched by workflow", "alert enriched by mapping rule", "alert was deduplicated", "alert was assigned with ticket", "alert was unassigned from ticket", "alert ticket was updated", "alert enrichments disposed", "alert deleted", "alert enriched", "alert un-enriched", "a comment was added to the alert", "a comment was removed from the alert", "Alert is in maintenance window"], "title": "AlertActionType", "description": "An enumeration."}, "AlertAuditDto": {"properties": {"id": {"type": "string", "title": "Id"}, "timestamp": {"type": "string", "format": "date-time", "title": "Timestamp"}, "fingerprint": {"type": "string", "title": "Fingerprint"}, "action": {"$ref": "#/components/schemas/AlertActionType"}, "user_id": {"type": "string", "title": "User Id"}, "description": {"type": "string", "title": "Description"}}, "type": "object", "required": ["id", "timestamp", "fingerprint", "action", "user_id", "description"], "title": "AlertAuditDto"}, "AlertDto": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "status": {"$ref": "#/components/schemas/AlertStatus"}, "severity": {"$ref": "#/components/schemas/AlertSeverity"}, "lastReceived": {"type": "string", "title": "Lastreceived"}, "firingStartTime": {"type": "string", "title": "Firingstarttime"}, "environment": {"type": "string", "title": "Environment", "default": "undefined"}, "isFullDuplicate": {"type": "boolean", "title": "Isfullduplicate", "default": false}, "isPartialDuplicate": {"type": "boolean", "title": "Ispartialduplicate", "default": false}, "duplicateReason": {"type": "string", "title": "Duplicatereason"}, "service": {"type": "string", "title": "Service"}, "source": {"items": {"type": "string"}, "type": "array", "title": "Source", "default": []}, "apiKeyRef": {"type": "string", "title": "Apikeyref"}, "message": {"type": "string", "title": "Message"}, "description": {"type": "string", "title": "Description"}, "pushed": {"type": "boolean", "title": "Pushed", "default": false}, "event_id": {"type": "string", "title": "Event Id"}, "url": {"type": "string", "maxLength": 65536, "minLength": 1, "format": "uri", "title": "Url"}, "labels": {"type": "object", "title": "Labels", "default": {}}, "fingerprint": {"type": "string", "title": "Fingerprint"}, "deleted": {"type": "boolean", "title": "Deleted", "default": false}, "dismissUntil": {"type": "string", "title": "Dismissuntil"}, "dismissed": {"type": "boolean", "title": "Dismissed", "default": false}, "assignee": {"type": "string", "title": "Assignee"}, "providerId": {"type": "string", "title": "Providerid"}, "providerType": {"type": "string", "title": "Providertype"}, "note": {"type": "string", "title": "Note"}, "startedAt": {"type": "string", "title": "Startedat"}, "isNoisy": {"type": "boolean", "title": "Isnoisy", "default": false}, "enriched_fields": {"items": {}, "type": "array", "title": "Enriched Fields", "default": []}}, "type": "object", "required": ["name", "status", "severity", "lastReceived"], "title": "AlertDto", "example": {"id": "1234", "name": "Alert name", "status": "firing", "lastReceived": "2021-01-01T00:00:00.000Z", "environment": "production", "service": "backend", "source": ["keep"], "message": "Keep: Alert message", "description": "Keep: Alert description", "severity": "critical", "pushed": true, "event_id": "1234", "url": "https://www.keephq.dev?alertId=1234", "labels": {"key": "value"}, "ticket_url": "https://www.keephq.dev?enrichedTicketId=456", "fingerprint": "1234"}}, "AlertPaginatedResultsDto": {"properties": {"limit": {"type": "integer", "title": "Limit", "default": 25}, "offset": {"type": "integer", "title": "Offset", "default": 0}, "count": {"type": "integer", "title": "Count"}, "items": {"items": {"$ref": "#/components/schemas/AlertDto"}, "type": "array", "title": "Items"}}, "type": "object", "required": ["count", "items"], "title": "AlertPaginatedResultsDto"}, "AlertSeverity": {"enum": ["critical", "high", "warning", "info", "low"], "title": "AlertSeverity", "description": "An enumeration."}, "AlertStatus": {"enum": ["firing", "resolved", "acknowledged", "suppressed", "pending"], "title": "AlertStatus", "description": "An enumeration."}, "Body_create_actions_actions_post": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "title": "Body_create_actions_actions_post"}, "Body_create_workflow_workflows_post": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "required": ["file"], "title": "Body_create_workflow_workflows_post"}, "Body_pusher_authentication_pusher_auth_post": {"properties": {"channel_name": {"title": "Channel Name"}, "socket_id": {"title": "Socket Id"}}, "type": "object", "required": ["channel_name", "socket_id"], "title": "Body_pusher_authentication_pusher_auth_post"}, "Body_put_action_actions__action_id__put": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "required": ["file"], "title": "Body_put_action_actions__action_id__put"}, "Body_run_workflow_from_definition_workflows_test_post": {"properties": {"file": {"type": "string", "format": "binary", "title": "File"}}, "type": "object", "title": "Body_run_workflow_from_definition_workflows_test_post"}, "CreateOrUpdateGroupRequest": {"properties": {"name": {"type": "string", "title": "Name"}, "roles": {"items": {"type": "string"}, "type": "array", "title": "Roles"}, "members": {"items": {"type": "string"}, "type": "array", "title": "Members"}}, "type": "object", "required": ["name", "roles", "members"], "title": "CreateOrUpdateGroupRequest"}, "CreateOrUpdatePresetDto": {"properties": {"name": {"type": "string", "title": "Name"}, "options": {"items": {"$ref": "#/components/schemas/PresetOption"}, "type": "array", "title": "Options"}, "is_private": {"type": "boolean", "title": "Is Private", "default": false}, "is_noisy": {"type": "boolean", "title": "Is Noisy", "default": false}, "tags": {"items": {"$ref": "#/components/schemas/TagDto"}, "type": "array", "title": "Tags", "default": []}}, "type": "object", "required": ["options"], "title": "CreateOrUpdatePresetDto"}, "CreateOrUpdateRole": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "scopes": {"items": {"type": "string"}, "type": "array", "uniqueItems": true, "title": "Scopes"}}, "type": "object", "title": "CreateOrUpdateRole"}, "CreatePresetTab": {"properties": {"name": {"type": "string", "title": "Name"}, "filter": {"type": "string", "title": "Filter"}}, "type": "object", "required": ["name", "filter"], "title": "CreatePresetTab"}, "DashboardCreateDTO": {"properties": {"dashboard_name": {"type": "string", "title": "Dashboard Name"}, "dashboard_config": {"type": "object", "title": "Dashboard Config"}}, "type": "object", "required": ["dashboard_name", "dashboard_config"], "title": "DashboardCreateDTO"}, "DashboardResponseDTO": {"properties": {"id": {"type": "string", "title": "Id"}, "dashboard_name": {"type": "string", "title": "Dashboard Name"}, "dashboard_config": {"type": "object", "title": "Dashboard Config"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}}, "type": "object", "required": ["id", "dashboard_name", "dashboard_config", "created_at", "updated_at"], "title": "DashboardResponseDTO"}, "DashboardUpdateDTO": {"properties": {"dashboard_config": {"type": "object", "title": "Dashboard Config"}, "dashboard_name": {"type": "string", "title": "Dashboard Name"}}, "type": "object", "title": "DashboardUpdateDTO"}, "DeduplicationRuleRequestDto": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "provider_type": {"type": "string", "title": "Provider Type"}, "provider_id": {"type": "string", "title": "Provider Id"}, "fingerprint_fields": {"items": {"type": "string"}, "type": "array", "title": "Fingerprint Fields"}, "full_deduplication": {"type": "boolean", "title": "Full Deduplication", "default": false}, "ignore_fields": {"items": {"type": "string"}, "type": "array", "title": "Ignore Fields"}}, "type": "object", "required": ["name", "provider_type", "fingerprint_fields"], "title": "DeduplicationRuleRequestDto"}, "DeleteRequestBody": {"properties": {"fingerprint": {"type": "string", "title": "Fingerprint"}, "lastReceived": {"type": "string", "title": "Lastreceived"}, "restore": {"type": "boolean", "title": "Restore", "default": false}}, "type": "object", "required": ["fingerprint", "lastReceived"], "title": "DeleteRequestBody"}, "EnrichAlertRequestBody": {"properties": {"enrichments": {"additionalProperties": {"type": "string"}, "type": "object", "title": "Enrichments"}, "fingerprint": {"type": "string", "title": "Fingerprint"}}, "type": "object", "required": ["enrichments", "fingerprint"], "title": "EnrichAlertRequestBody"}, "ExtractionRuleDtoBase": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "attribute": {"type": "string", "title": "Attribute"}, "condition": {"type": "string", "title": "Condition"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "regex": {"type": "string", "title": "Regex"}, "pre": {"type": "boolean", "title": "Pre", "default": false}}, "type": "object", "required": ["name", "regex"], "title": "ExtractionRuleDtoBase"}, "ExtractionRuleDtoOut": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "attribute": {"type": "string", "title": "Attribute"}, "condition": {"type": "string", "title": "Condition"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "regex": {"type": "string", "title": "Regex"}, "pre": {"type": "boolean", "title": "Pre", "default": false}, "id": {"type": "integer", "title": "Id"}, "created_by": {"type": "string", "title": "Created By"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "updated_by": {"type": "string", "title": "Updated By"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}}, "type": "object", "required": ["name", "regex", "id", "created_at"], "title": "ExtractionRuleDtoOut"}, "Group": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "roles": {"items": {"type": "string"}, "type": "array", "title": "Roles", "default": []}, "members": {"items": {"type": "string"}, "type": "array", "title": "Members", "default": []}, "memberCount": {"type": "integer", "title": "Membercount", "default": 0}}, "type": "object", "required": ["id", "name"], "title": "Group"}, "HTTPValidationError": {"properties": {"detail": {"items": {"$ref": "#/components/schemas/ValidationError"}, "type": "array", "title": "Detail"}}, "type": "object", "title": "HTTPValidationError"}, "IncidentDto": {"properties": {"user_generated_name": {"type": "string", "title": "User Generated Name"}, "assignee": {"type": "string", "title": "Assignee"}, "user_summary": {"type": "string", "title": "User Summary"}, "id": {"type": "string", "format": "uuid", "title": "Id"}, "start_time": {"type": "string", "format": "date-time", "title": "Start Time"}, "last_seen_time": {"type": "string", "format": "date-time", "title": "Last Seen Time"}, "end_time": {"type": "string", "format": "date-time", "title": "End Time"}, "alerts_count": {"type": "integer", "title": "Alerts Count"}, "alert_sources": {"items": {"type": "string"}, "type": "array", "title": "Alert Sources"}, "severity": {"$ref": "#/components/schemas/IncidentSeverity"}, "status": {"allOf": [{"$ref": "#/components/schemas/IncidentStatus"}], "default": "firing"}, "services": {"items": {"type": "string"}, "type": "array", "title": "Services"}, "is_predicted": {"type": "boolean", "title": "Is Predicted"}, "is_confirmed": {"type": "boolean", "title": "Is Confirmed"}, "generated_summary": {"type": "string", "title": "Generated Summary"}, "ai_generated_name": {"type": "string", "title": "Ai Generated Name"}, "rule_fingerprint": {"type": "string", "title": "Rule Fingerprint"}}, "type": "object", "required": ["id", "alerts_count", "alert_sources", "severity", "services", "is_predicted", "is_confirmed"], "title": "IncidentDto", "example": {"id": "c2509cb3-6168-4347-b83b-a41da9df2d5b", "name": "Incident name", "user_summary": "Keep: Incident description", "status": "firing"}}, "IncidentDtoIn": {"properties": {"user_generated_name": {"type": "string", "title": "User Generated Name"}, "assignee": {"type": "string", "title": "Assignee"}, "user_summary": {"type": "string", "title": "User Summary"}}, "type": "object", "title": "IncidentDtoIn", "example": {"id": "c2509cb3-6168-4347-b83b-a41da9df2d5b", "name": "Incident name", "user_summary": "Keep: Incident description", "status": "firing"}}, "IncidentSeverity": {"enum": ["critical", "high", "warning", "info", "low"], "title": "IncidentSeverity", "description": "An enumeration."}, "IncidentSorting": {"enum": ["creation_time", "start_time", "last_seen_time", "severity", "status", "alerts_count", "-creation_time", "-start_time", "-last_seen_time", "-severity", "-status", "-alerts_count"], "title": "IncidentSorting", "description": "An enumeration."}, "IncidentStatus": {"enum": ["firing", "resolved", "acknowledged"], "title": "IncidentStatus", "description": "An enumeration."}, "IncidentStatusChangeDto": {"properties": {"status": {"$ref": "#/components/schemas/IncidentStatus"}, "comment": {"type": "string", "title": "Comment"}}, "type": "object", "required": ["status"], "title": "IncidentStatusChangeDto"}, "IncidentsPaginatedResultsDto": {"properties": {"limit": {"type": "integer", "title": "Limit", "default": 25}, "offset": {"type": "integer", "title": "Offset", "default": 0}, "count": {"type": "integer", "title": "Count"}, "items": {"items": {"$ref": "#/components/schemas/IncidentDto"}, "type": "array", "title": "Items"}}, "type": "object", "required": ["count", "items"], "title": "IncidentsPaginatedResultsDto"}, "MaintenanceRuleCreate": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "cel_query": {"type": "string", "title": "Cel Query"}, "start_time": {"type": "string", "format": "date-time", "title": "Start Time"}, "duration_seconds": {"type": "integer", "title": "Duration Seconds"}, "suppress": {"type": "boolean", "title": "Suppress", "default": false}, "enabled": {"type": "boolean", "title": "Enabled", "default": true}}, "type": "object", "required": ["name", "cel_query", "start_time"], "title": "MaintenanceRuleCreate"}, "MaintenanceRuleRead": {"properties": {"id": {"type": "integer", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "created_by": {"type": "string", "title": "Created By"}, "cel_query": {"type": "string", "title": "Cel Query"}, "start_time": {"type": "string", "format": "date-time", "title": "Start Time"}, "end_time": {"type": "string", "format": "date-time", "title": "End Time"}, "duration_seconds": {"type": "integer", "title": "Duration Seconds"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}, "suppress": {"type": "boolean", "title": "Suppress", "default": false}, "enabled": {"type": "boolean", "title": "Enabled", "default": true}}, "type": "object", "required": ["id", "name", "created_by", "cel_query", "start_time", "end_time"], "title": "MaintenanceRuleRead"}, "MappingRule": {"properties": {"id": {"type": "integer", "title": "Id"}, "tenant_id": {"type": "string", "title": "Tenant Id"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "name": {"type": "string", "maxLength": 255, "title": "Name"}, "description": {"type": "string", "maxLength": 2048, "title": "Description"}, "file_name": {"type": "string", "maxLength": 255, "title": "File Name"}, "created_by": {"type": "string", "maxLength": 255, "title": "Created By"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "override": {"type": "boolean", "title": "Override", "default": true}, "condition": {"type": "string", "maxLength": 2000, "title": "Condition"}, "type": {"type": "string", "maxLength": 255, "title": "Type"}, "matchers": {"items": {"type": "string"}, "type": "array", "title": "Matchers"}, "rows": {"items": {"type": "object"}, "type": "array", "title": "Rows"}, "updated_by": {"type": "string", "maxLength": 255, "title": "Updated By"}, "last_updated_at": {"type": "string", "format": "date-time", "title": "Last Updated At"}}, "type": "object", "required": ["tenant_id", "name", "type", "matchers"], "title": "MappingRule"}, "MappingRuleDtoIn": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "file_name": {"type": "string", "title": "File Name"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "matchers": {"items": {"type": "string"}, "type": "array", "title": "Matchers"}, "type": {"type": "string", "enum": ["csv", "topology"], "title": "Type", "default": "csv"}, "rows": {"items": {"type": "object"}, "type": "array", "title": "Rows"}}, "type": "object", "required": ["name", "matchers"], "title": "MappingRuleDtoIn"}, "MappingRuleDtoOut": {"properties": {"name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "file_name": {"type": "string", "title": "File Name"}, "priority": {"type": "integer", "title": "Priority", "default": 0}, "matchers": {"items": {"type": "string"}, "type": "array", "title": "Matchers"}, "type": {"type": "string", "enum": ["csv", "topology"], "title": "Type", "default": "csv"}, "id": {"type": "integer", "title": "Id"}, "created_by": {"type": "string", "title": "Created By"}, "created_at": {"type": "string", "format": "date-time", "title": "Created At"}, "attributes": {"items": {"type": "string"}, "type": "array", "title": "Attributes", "default": []}, "updated_by": {"type": "string", "title": "Updated By"}, "last_updated_at": {"type": "string", "format": "date-time", "title": "Last Updated At"}}, "type": "object", "required": ["name", "matchers", "id", "created_at"], "title": "MappingRuleDtoOut"}, "PermissionEntity": {"properties": {"id": {"type": "string", "title": "Id"}, "type": {"type": "string", "title": "Type"}}, "type": "object", "required": ["id", "type"], "title": "PermissionEntity"}, "PresetDto": {"properties": {"id": {"type": "string", "format": "uuid", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "options": {"items": {}, "type": "array", "title": "Options", "default": []}, "created_by": {"type": "string", "title": "Created By"}, "is_private": {"type": "boolean", "title": "Is Private", "default": false}, "is_noisy": {"type": "boolean", "title": "Is Noisy", "default": false}, "should_do_noise_now": {"type": "boolean", "title": "Should Do Noise Now", "default": false}, "alerts_count": {"type": "integer", "title": "Alerts Count", "default": 0}, "static": {"type": "boolean", "title": "Static", "default": false}, "tags": {"items": {"$ref": "#/components/schemas/TagDto"}, "type": "array", "title": "Tags", "default": []}}, "type": "object", "required": ["id", "name"], "title": "PresetDto"}, "PresetOption": {"properties": {"label": {"type": "string", "title": "Label"}, "value": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Value"}}, "type": "object", "required": ["label", "value"], "title": "PresetOption"}, "PresetSearchQuery": {"properties": {"cel_query": {"type": "string", "minLength": 1, "title": "Cel Query"}, "sql_query": {"type": "object", "title": "Sql Query"}, "limit": {"type": "integer", "minimum": 0.0, "title": "Limit", "default": 1000}, "timeframe": {"type": "integer", "minimum": 0.0, "title": "Timeframe", "default": 0}}, "type": "object", "required": ["cel_query", "sql_query"], "title": "PresetSearchQuery"}, "ProviderDTO": {"properties": {"type": {"type": "string", "title": "Type"}, "id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "installed": {"type": "boolean", "title": "Installed"}}, "type": "object", "required": ["type", "name", "installed"], "title": "ProviderDTO"}, "ProviderWebhookSettings": {"properties": {"webhookDescription": {"type": "string", "title": "Webhookdescription"}, "webhookTemplate": {"type": "string", "title": "Webhooktemplate"}, "webhookMarkdown": {"type": "string", "title": "Webhookmarkdown"}}, "type": "object", "required": ["webhookTemplate"], "title": "ProviderWebhookSettings"}, "ResourcePermission": {"properties": {"resource_id": {"type": "string", "title": "Resource Id"}, "resource_name": {"type": "string", "title": "Resource Name"}, "resource_type": {"type": "string", "title": "Resource Type"}, "permissions": {"items": {"$ref": "#/components/schemas/PermissionEntity"}, "type": "array", "title": "Permissions"}}, "type": "object", "required": ["resource_id", "resource_name", "resource_type", "permissions"], "title": "ResourcePermission"}, "Role": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "scopes": {"items": {"type": "string"}, "type": "array", "uniqueItems": true, "title": "Scopes"}, "predefined": {"type": "boolean", "title": "Predefined", "default": true}}, "type": "object", "required": ["id", "name", "description", "scopes"], "title": "Role"}, "RuleCreateDto": {"properties": {"ruleName": {"type": "string", "title": "Rulename"}, "sqlQuery": {"type": "object", "title": "Sqlquery"}, "celQuery": {"type": "string", "title": "Celquery"}, "timeframeInSeconds": {"type": "integer", "title": "Timeframeinseconds"}, "timeUnit": {"type": "string", "title": "Timeunit"}, "groupingCriteria": {"items": {}, "type": "array", "title": "Groupingcriteria", "default": []}, "groupDescription": {"type": "string", "title": "Groupdescription"}, "requireApprove": {"type": "boolean", "title": "Requireapprove", "default": false}}, "type": "object", "required": ["ruleName", "sqlQuery", "celQuery", "timeframeInSeconds", "timeUnit"], "title": "RuleCreateDto"}, "SMTPSettings": {"properties": {"host": {"type": "string", "title": "Host"}, "port": {"type": "integer", "title": "Port"}, "from_email": {"type": "string", "title": "From Email"}, "username": {"type": "string", "title": "Username"}, "password": {"type": "string", "format": "password", "title": "Password", "writeOnly": true}, "secure": {"type": "boolean", "title": "Secure", "default": true}, "to_email": {"type": "string", "title": "To Email", "default": "keep@example.com"}}, "type": "object", "required": ["host", "port", "from_email"], "title": "SMTPSettings", "example": {"host": "smtp.example.com", "port": 587, "username": "user@example.com", "password": "password", "secure": true, "from_email": "noreply@example.com", "to_email": ""}}, "SearchAlertsRequest": {"properties": {"query": {"$ref": "#/components/schemas/PresetSearchQuery"}, "timeframe": {"type": "integer", "title": "Timeframe"}}, "type": "object", "required": ["query", "timeframe"], "title": "SearchAlertsRequest"}, "TagDto": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name"}}, "type": "object", "required": ["name"], "title": "TagDto"}, "TopologyApplicationDtoIn": {"properties": {"id": {"type": "string", "format": "uuid", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "services": {"items": {"$ref": "#/components/schemas/TopologyServiceDtoIn"}, "type": "array", "title": "Services", "default": []}}, "type": "object", "required": ["name"], "title": "TopologyApplicationDtoIn"}, "TopologyApplicationDtoOut": {"properties": {"id": {"type": "string", "format": "uuid", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "description": {"type": "string", "title": "Description"}, "services": {"items": {"$ref": "#/components/schemas/TopologyApplicationServiceDto"}, "type": "array", "title": "Services", "default": []}}, "type": "object", "required": ["id", "name"], "title": "TopologyApplicationDtoOut"}, "TopologyApplicationServiceDto": {"properties": {"id": {"type": "integer", "title": "Id"}, "name": {"type": "string", "title": "Name"}, "service": {"type": "string", "title": "Service"}}, "type": "object", "required": ["id", "name", "service"], "title": "TopologyApplicationServiceDto"}, "TopologyServiceDependencyDto": {"properties": {"serviceId": {"type": "integer", "title": "Serviceid"}, "serviceName": {"type": "string", "title": "Servicename"}, "protocol": {"type": "string", "title": "Protocol", "default": "unknown"}}, "type": "object", "required": ["serviceId", "serviceName"], "title": "TopologyServiceDependencyDto"}, "TopologyServiceDtoIn": {"properties": {"id": {"type": "integer", "title": "Id"}}, "type": "object", "required": ["id"], "title": "TopologyServiceDtoIn"}, "TopologyServiceDtoOut": {"properties": {"source_provider_id": {"type": "string", "title": "Source Provider Id"}, "repository": {"type": "string", "title": "Repository"}, "tags": {"items": {"type": "string"}, "type": "array", "title": "Tags"}, "service": {"type": "string", "title": "Service"}, "display_name": {"type": "string", "title": "Display Name"}, "environment": {"type": "string", "title": "Environment", "default": "unknown"}, "description": {"type": "string", "title": "Description"}, "team": {"type": "string", "title": "Team"}, "email": {"type": "string", "title": "Email"}, "slack": {"type": "string", "title": "Slack"}, "ip_address": {"type": "string", "title": "Ip Address"}, "mac_address": {"type": "string", "title": "Mac Address"}, "category": {"type": "string", "title": "Category"}, "manufacturer": {"type": "string", "title": "Manufacturer"}, "id": {"type": "integer", "title": "Id"}, "dependencies": {"items": {"$ref": "#/components/schemas/TopologyServiceDependencyDto"}, "type": "array", "title": "Dependencies"}, "application_ids": {"items": {"type": "string", "format": "uuid"}, "type": "array", "title": "Application Ids"}, "updated_at": {"type": "string", "format": "date-time", "title": "Updated At"}}, "type": "object", "required": ["service", "display_name", "id", "dependencies", "application_ids"], "title": "TopologyServiceDtoOut"}, "UnEnrichAlertRequestBody": {"properties": {"enrichments": {"items": {"type": "string"}, "type": "array", "title": "Enrichments"}, "fingerprint": {"type": "string", "title": "Fingerprint"}}, "type": "object", "required": ["enrichments", "fingerprint"], "title": "UnEnrichAlertRequestBody"}, "UpdateUserRequest": {"properties": {"username": {"type": "string", "title": "Username"}, "password": {"type": "string", "title": "Password"}, "role": {"type": "string", "title": "Role"}, "groups": {"items": {"type": "string"}, "type": "array", "title": "Groups"}}, "type": "object", "title": "UpdateUserRequest"}, "User": {"properties": {"email": {"type": "string", "title": "Email"}, "name": {"type": "string", "title": "Name"}, "role": {"type": "string", "title": "Role"}, "picture": {"type": "string", "title": "Picture"}, "created_at": {"type": "string", "title": "Created At"}, "last_login": {"type": "string", "title": "Last Login"}, "ldap": {"type": "boolean", "title": "Ldap", "default": false}, "groups": {"items": {"$ref": "#/components/schemas/Group"}, "type": "array", "title": "Groups", "default": []}}, "type": "object", "required": ["email", "name", "created_at"], "title": "User"}, "ValidationError": {"properties": {"loc": {"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]}, "type": "array", "title": "Location"}, "msg": {"type": "string", "title": "Message"}, "type": {"type": "string", "title": "Error Type"}}, "type": "object", "required": ["loc", "msg", "type"], "title": "ValidationError"}, "WebhookSettings": {"properties": {"webhookApi": {"type": "string", "title": "Webhookapi"}, "apiKey": {"type": "string", "title": "Apikey"}, "modelSchema": {"type": "object", "title": "Modelschema"}}, "type": "object", "required": ["webhookApi", "apiKey", "modelSchema"], "title": "WebhookSettings"}, "WorkflowCreateOrUpdateDTO": {"properties": {"workflow_id": {"type": "string", "title": "Workflow Id"}, "status": {"type": "string", "enum": ["created", "updated"], "title": "Status"}, "revision": {"type": "integer", "title": "Revision", "default": 1}}, "type": "object", "required": ["workflow_id", "status"], "title": "WorkflowCreateOrUpdateDTO"}, "WorkflowDTO": {"properties": {"id": {"type": "string", "title": "Id"}, "name": {"type": "string", "title": "Name", "default": "Workflow file doesn't contain name"}, "description": {"type": "string", "title": "Description", "default": "Workflow file doesn't contain description"}, "created_by": {"type": "string", "title": "Created By"}, "creation_time": {"type": "string", "format": "date-time", "title": "Creation Time"}, "triggers": {"items": {"type": "object"}, "type": "array", "title": "Triggers"}, "interval": {"type": "integer", "title": "Interval"}, "disabled": {"type": "boolean", "title": "Disabled", "default": false}, "last_execution_time": {"type": "string", "format": "date-time", "title": "Last Execution Time"}, "last_execution_status": {"type": "string", "title": "Last Execution Status"}, "providers": {"items": {"$ref": "#/components/schemas/ProviderDTO"}, "type": "array", "title": "Providers"}, "workflow_raw": {"type": "string", "title": "Workflow Raw"}, "revision": {"type": "integer", "title": "Revision", "default": 1}, "last_updated": {"type": "string", "format": "date-time", "title": "Last Updated"}, "invalid": {"type": "boolean", "title": "Invalid", "default": false}, "last_executions": {"items": {"type": "object"}, "type": "array", "title": "Last Executions"}, "last_execution_started": {"type": "string", "format": "date-time", "title": "Last Execution Started"}, "provisioned": {"type": "boolean", "title": "Provisioned", "default": false}, "provisioned_file": {"type": "string", "title": "Provisioned File"}}, "type": "object", "required": ["id", "created_by", "creation_time", "providers", "workflow_raw"], "title": "WorkflowDTO"}, "WorkflowExecutionDTO": {"properties": {"id": {"type": "string", "title": "Id"}, "workflow_id": {"type": "string", "title": "Workflow Id"}, "started": {"type": "string", "format": "date-time", "title": "Started"}, "triggered_by": {"type": "string", "title": "Triggered By"}, "status": {"type": "string", "title": "Status"}, "logs": {"items": {"$ref": "#/components/schemas/WorkflowExecutionLogsDTO"}, "type": "array", "title": "Logs"}, "error": {"type": "string", "title": "Error"}, "execution_time": {"type": "number", "title": "Execution Time"}, "results": {"type": "object", "title": "Results"}}, "type": "object", "required": ["id", "workflow_id", "started", "triggered_by", "status"], "title": "WorkflowExecutionDTO"}, "WorkflowExecutionLogsDTO": {"properties": {"id": {"type": "integer", "title": "Id"}, "timestamp": {"type": "string", "format": "date-time", "title": "Timestamp"}, "message": {"type": "string", "title": "Message"}, "context": {"type": "object", "title": "Context"}}, "type": "object", "required": ["id", "timestamp", "message"], "title": "WorkflowExecutionLogsDTO"}, "WorkflowExecutionsPaginatedResultsDto": {"properties": {"limit": {"type": "integer", "title": "Limit", "default": 25}, "offset": {"type": "integer", "title": "Offset", "default": 0}, "count": {"type": "integer", "title": "Count"}, "items": {"items": {"$ref": "#/components/schemas/WorkflowExecutionDTO"}, "type": "array", "title": "Items"}, "passCount": {"type": "integer", "title": "Passcount", "default": 0}, "avgDuration": {"type": "number", "title": "Avgduration", "default": 0.0}, "workflow": {"$ref": "#/components/schemas/WorkflowDTO"}, "failCount": {"type": "integer", "title": "Failcount", "default": 0}}, "type": "object", "required": ["count", "items"], "title": "WorkflowExecutionsPaginatedResultsDto"}, "WorkflowToAlertExecutionDTO": {"properties": {"workflow_id": {"type": "string", "title": "Workflow Id"}, "workflow_execution_id": {"type": "string", "title": "Workflow Execution Id"}, "alert_fingerprint": {"type": "string", "title": "Alert Fingerprint"}, "workflow_status": {"type": "string", "title": "Workflow Status"}, "workflow_started": {"type": "string", "format": "date-time", "title": "Workflow Started"}}, "type": "object", "required": ["workflow_id", "workflow_execution_id", "alert_fingerprint", "workflow_status", "workflow_started"], "title": "WorkflowToAlertExecutionDTO"}, "keep__api__routes__auth__users__CreateUserRequest": {"properties": {"username": {"type": "string", "title": "Username"}, "name": {"type": "string", "title": "Name"}, "password": {"type": "string", "title": "Password"}, "role": {"type": "string", "title": "Role"}, "groups": {"items": {"type": "string"}, "type": "array", "title": "Groups"}}, "type": "object", "required": ["username"], "title": "CreateUserRequest"}, "keep__api__routes__settings__CreateUserRequest": {"properties": {"username": {"type": "string", "title": "Username"}, "password": {"type": "string", "title": "Password"}, "role": {"type": "string", "title": "Role"}}, "type": "object", "required": ["username", "role"], "title": "CreateUserRequest"}}, "securitySchemes": {"API Key": {"type": "apiKey", "in": "header", "name": "X-API-KEY"}, "HTTPBasic": {"type": "http", "scheme": "basic"}, "OAuth2PasswordBearer": {"type": "oauth2", "flows": {"password": {"scopes": {}, "tokenUrl": "token"}}}}}} \ No newline at end of file diff --git a/docs/overview/comparisons.mdx b/docs/overview/comparisons.mdx new file mode 100644 index 0000000000..2329579f33 --- /dev/null +++ b/docs/overview/comparisons.mdx @@ -0,0 +1,40 @@ +--- +title: "Comparison" +--- + +It's often easier to grasp a tool's features by comparing it to others in the same ecosystem. Here, we'll explain how Keep interacts with and compares to these tools. + +## Keep vs IRM (PagerDuty, OpsGenie, etc.) + +Incident management tools aim to notify the right person at the right time, simplify reporting, and set up efficient war rooms. + +"Keep" focuses on the alert lifecycle, noise reduction, and AI-driven alert-incident correlation. Essentially, Keep acts as an 'intelligent layer before the IRM,' managing millions of alerts before they reach your IRM tool. Keep offers high-quality integrations with PagerDuty, OpsGenie, Grafana OnCall, and more. + +## Keep vs AIOps in Observability (Elastic, Splunk, etc.) + +Keep is different because itโ€™s able to correlate alerts between different observability platforms. + +| | Keep | Alternative | +| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | +| Aggregates alerts from one platform | โœ… | โœ… | +| Aggregates alerts from mutliple platforms | โœ… | โŒ | +| Correlates alerts between multiple sources | โœ… | โŒ | +| Alerts enrichment | โœ… | โŒ | +| Open source | โœ… | โŒ | +| Workflow automation | โœ… | โŒ | + +## Keep vs AIOps platforms (BigPanda, Moogsoft, etc.) + +Keep is an alternative to platforms like BigPanda and Moogsoft. +Customers who have used both traditional platforms and Keep notice a significant improvement in alert correlation. Unlike the manual methods of other platforms, Keep uses advanced state-of-the-art AI models for easier and more effective alert correlation. + +| | Keep | Alternative | +| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- | +| Aggregation of alerts | โœ… | โœ… | +| Integrations | โœ… (Bi-directional) | โœ… (Webhooks) | +| Alerts enrichment | โœ… | โœ… | +| Open source | โœ… | โŒ | +| Workflow automation | โœ… (GitHub Actions-like, infrastructure as code) | โœ… | +| Managed version | โœ… | โœ… | +| On-Premises | โœ… | โŒ | +| Noise reduction & correlation | โœ… (AI) | โœ… (Rule-based in some cases) | diff --git a/docs/overview/deduplication.mdx b/docs/overview/deduplication.mdx new file mode 100644 index 0000000000..00d755ec57 --- /dev/null +++ b/docs/overview/deduplication.mdx @@ -0,0 +1,107 @@ +--- +title: "Alert Deduplication" +--- + +## Overview + +Alert deduplication is a crucial feature in Keep that helps reduce noise and streamline incident management by grouping similar alerts together. This process ensures that your team isn't overwhelmed by a flood of notifications for what is essentially the same issue, allowing for more efficient and focused incident response. + +## Glossary + +- **Deduplication Rule**: A set of criteria used to determine if alerts should be grouped together. +- **Partial Deduplication**: Correlates instances of alerts into single alerts, considering the case of the same alert with different statuses (e.g., firing and resolved). This is the default mode where specified fields are used to identify and group related alerts. +- **Fingerprint Fields**: Specific alert attributes used to identify similar alerts. +- **Full Deduplication**: A mode where alerts are considered identical if all fields match exactly (except those explicitly ignored). This helps avoid system overload by discarding duplicate alerts. +- **Ignore Fields**: In full deduplication mode, these are fields that are not considered when comparing alerts. + +## Deduplication Types + +### Partial Deduplication +Partial deduplication allows you to specify certain fields (fingerprint fields) that are used to identify similar alerts. Alerts with matching values in these specified fields are considered duplicates and are grouped together. This method is flexible and allows for fine-tuned control over how alerts are deduplicated. + +Every provider integrated with Keep comes with pre-built partial deduplication rule tailored to that provider's specific alert format and common use cases. +The default fingerprint fields defined using `FINGERPRINT_FIELDS` attributes in the provider code (e.g. [datadog provider](https://github.com/keephq/keep/blob/main/keep/providers/datadog_provider/datadog_provider.py#L188) or [gcp monitoring provder](https://github.com/keephq/keep/blob/main/keep/providers/gcpmonitoring_provider/gcpmonitoring_provider.py#L52)). + +### Full Deduplication +When full deduplication is enabled, Keep will also discard exact same events (excluding ignore fields). This mode considers all fields of an alert when determining duplicates, except for explicitly ignored fields. + +By default, exact similar events excluding lastReceived time are fully deduplicated and discarded. This helps prevent system overload from repeated identical alerts. + +## Real Examples of Alerts and Results + +### Example 1: Partial Deduplication + +**Rule** - Deduplicate based on 'service' and 'error_message' fields. + +```json +# alert 1 +{ + "service": "payment", + "error_message": "Database connection failed", + "severity": "high", + "lastReceived": "2023-05-01T10:00:00Z" +} +# alert 2 +{ + "service": "payment", + "error_message": "Database connection failed", + "severity": "critical", + "lastReceived": "2023-05-01T10:05:00Z" +} +# alert 3 +{ + "service": "auth", + "error_message": "Invalid token", + "severity": "medium", + "lastReceived": "2023-05-01T10:10:00Z" +} +``` + +**Result**: +- Alerts 1 and 2 are deduplicated into a single alert, fields are updated. +- Alert 3 remains separate as it has a different service and error message. + +### Example 2: Full Deduplication + +**Rule**: Full deduplication with 'timestamp' as an ignore field + +**Incoming Alerts**: + +```json + +# alert 1 +{ + service: "api", + error: "Rate limit exceeded", + user_id: "12345", + lastReceived: "2023-05-02T14:00:00Z" +} +# alert 2 (discarded as its identical) +{ + service: "api", + error: "Rate limit exceeded", + user_id: "12345", + lastReceived: "2023-05-02T14:01:00Z" +} +# alert 3 +{ + service: "api", + error: "Rate limit exceeded", + user_id: "67890", + lastReceived: "2023-05-02T14:02:00Z" +} +``` + +**Result**: +- Alerts 1 and 2 are deduplicated as they are identical except for the ignored timestamp field. +- Alert 3 remains separate due to the different user_id. + +## How It Works + +Keep's deduplication process follows these steps: + +1. **Alert Ingestion**: Every alert received by Keep is first ingested into the system. + +2. **Enrichment**: After ingestion, each alert undergoes an enrichment process. This step adds additional context or information to the alert, enhancing its value and usefulness. + +3. **Deduplication**: Following enrichment, Keep's alert deduplicator comes into play. It applies the defined deduplication rules to the enriched alerts. diff --git a/docs/overview/enrichment/extraction.mdx b/docs/overview/enrichment/extraction.mdx new file mode 100644 index 0000000000..74e2840ac4 --- /dev/null +++ b/docs/overview/enrichment/extraction.mdx @@ -0,0 +1,47 @@ +--- +title: "Extraction" +--- + +# Alert Enrichment: Extraction + +Keep's Alert Extraction enrichment feature enables dynamic extraction of data from incoming alerts using regular expressions. This powerful tool allows users to define extraction rules that identify and extract data based on patterns, enriching alerts with additional structured data derived directly from alert content. + +## Introduction + +Handling a variety of alert formats and extracting relevant information can be challenging. Keep's Alert Extraction feature simplifies this process by allowing users to define regex-based rules that automatically extract key pieces of information from alerts. This capability is crucial for standardizing alert data and enhancing alert context, which facilitates more effective monitoring and response strategies. + +## How It Works + +1. **Rule Definition**: Users create extraction rules specifying the regex patterns to apply to certain alert attributes. +2. **Attribute Specification**: Each rule defines which attribute of the alert should be examined by the regex. +3. **Data Extraction**: When an alert is received, the system applies the regex to the specified attribute. If the pattern matches, named groups within the regex define new attributes to be extracted and added to the alert. +4. **First Match Enforcement**: The extraction process is designed to stop after the first successful match. Once a rule successfully applies and enriches the alert, no further rules are processed. This ensures efficiency and prevents overlapping or redundant data extraction. +5. **Alert Enrichment**: Extracted values are added to the alert, enhancing its data with additional attributes for improved analysis. + +## Practical Example + +Suppose you receive alerts with a message attribute formatted as "Error 404: Not Found - [UserID: 12345]". You can define an extraction rule with a regex such as `Error (?P\d+): (?P.+) - \[UserID: (?P\d+)\]` to extract `error_code`, `error_message`, and `user_id` as separate attributes in the alert. + +## Core Concepts + +- **Regex (Regular Expression)**: A powerful pattern-matching syntax used to identify specific patterns within text. In the context of extraction rules, regex is used to define how data should be extracted from alert attributes. It is crucial that regex patterns adhere to [Python's regex syntax](https://docs.python.org/3.11/library/re.html#match-objects), especially concerning group matching using named groups. +- **Attribute**: The part of the alert data (e.g., message, description) that the regex is applied to. +- **Named Groups**: Part of the regex pattern that specifies placeholders for extracting specific data points into new alert attributes. + +## Creating an Extraction Rule + +To create an alert extraction rule: + + + + + +1. **Select the Attribute**: Choose which attribute of the alert should be examined by the regex. +2. **Define the Regex**: Write a regex pattern with named groups that specify what information to extract. Ensure the regex is valid according to Pythonโ€™s regex standards, particularly for group matching. +3. **Configure Conditions**: Optionally, specify conditions under which this rule should apply, using CEL (Common Expression Language) for complex logic. + +## Best Practices + +- **Test Regex Patterns**: Before deploying a new extraction rule, thoroughly test the regex pattern to ensure it correctly matches and extracts data according to Python's regex standards. +- **Monitor Extraction Performance**: Keep track of how extraction rules are performing and whether they are enriching alerts as expected. Adjust patterns as necessary based on incoming alert data. +- **Use Specific Conditions**: When applicable, define conditions to limit when extraction rules apply, reducing unnecessary processing and focusing on relevant alerts. diff --git a/docs/overview/enrichment/mapping.mdx b/docs/overview/enrichment/mapping.mdx new file mode 100644 index 0000000000..df140a2f0c --- /dev/null +++ b/docs/overview/enrichment/mapping.mdx @@ -0,0 +1,62 @@ +--- +title: "Mapping" +--- + +# Alert Enrichment: Mapping + +Keep's Alert Mapping enrichment feature provides a powerful mechanism for dynamically enhancing alert data by leveraging external data sources, such as CSV files and topology data. This feature allows for the matching of incoming alerts to specific records in a CSV file or topology data based on predefined attributes (matchers) and enriching those alerts with additional information from the matched records. + +## Introduction + +In complex monitoring environments, the need to enrich alert data with additional context is critical for effective alert analysis and response. Keep's Alert Mapping and Enrichment enables users to define rules that match alerts to rows in a CSV file or topology data, appending or modifying alert attributes with the values from matching rows. This process adds significant value to each alert, providing deeper insights and enabling more precise and informed decision-making. + +## How It Works + +## Mapping with CSV Files + +1. **Rule Definition**: Users define mapping rules that specify which alert attributes (matchers) should be used for matching alerts to rows in a CSV file. +2. **CSV File Specification**: A CSV file is associated with each mapping rule. This file contains additional data that should be added to alerts matching the rule. +3. **Alert Matching**: When an alert is received, the system checks if it matches the conditions of any mapping rule based on the specified matchers. +4. **Data Enrichment**: If a match is found, the alert is enriched with additional data from the corresponding row in the CSV file. + +## Mapping with Topology Data + +1. **Rule Definition**: Users define mapping rules that specify which alert attributes (matchers) should be used for matching alerts to topology data. +2. **Topology Data Specification**: Topology data is associated with each mapping rule. This data contains additional information about the components and their relationships in your environment. +3. **Alert Matching**: When an alert is received, the system checks if it matches the conditions of any mapping rule based on the specified matchers. +4. **Data Enrichment**: If a match is found, the alert is enriched with additional data from the corresponding topology data. + +## Practical Example + +Imagine you have a CSV file with columns representing different aspects of your infrastructure, such as `region`, `responsible_team`, and `severity_override`. By creating a mapping rule that matches alerts based on `service` and `region`, you can automatically enrich alerts with the responsible team and adjust severity based on the matched row in the CSV file. + +Similarly, you can use topology data to enrich alerts. For example, if an alert is related to a specific service, you can use topology data to find related components and their statuses, providing a more comprehensive view of the issue. + +## Core Concepts + +- **Matchers**: Attributes within the alert used to identify matching rows within the CSV file or topology data. Common matchers include identifiers like `service` or `region`. +- **CSV File**: A structured file containing rows of data. Each column represents a potential attribute that can be added to an alert. +- **Topology Data**: Information about the components and their relationships in your environment. This data can be used to enrich alerts with additional context. +- **Enrichment**: The process of adding new attributes or modifying existing ones in an alert based on the data from a matching CSV row or topology data. + +## Creating a Mapping Rule + +To create an alert mapping and enrichment rule: + + + + + +1. **Define the Matchers**: Specify which alert attributes will be used to match rows in the CSV file or topology data. +2. **Specify the Data Source**: Provide the CSV file or specify the topology data to be used for enrichment. +3. **Configure the Rule**: Set additional parameters, such as whether the rule should override existing alert attributes. + +## Best Practices + +- **Keep CSV Files and Topology Data Updated**: Regularly update the CSV files and topology data to reflect the current state of your infrastructure and operational data. +- **Use Specific Matchers**: Define matchers that are unique and relevant to ensure accurate matching. +- **Monitor Rule Performance**: Review the application of mapping rules to ensure they are working as expected and adjust them as necessary. + + + + diff --git a/docs/overview/examples.mdx b/docs/overview/examples.mdx new file mode 100644 index 0000000000..69ae876c10 --- /dev/null +++ b/docs/overview/examples.mdx @@ -0,0 +1,111 @@ +--- +title: "Examples" +--- + +Got an interesting example of how would you use Keep? Feel free to submit a new example issue and we'll credit you when we add it! + + +## Create an incident only if the customer is on Enterprise tier +In this example we will utilze: + +1. Datadog for monitoring +2. OpsGenie for incident managment +3. A postgres database that stores the customer tier. + +This example consists of two steps: +1. Connect your tools - Datadog, OpsGenie and Postgres. +2. Create a workflow that is triggered by the alert, runs an SQL query, and decides whether to create an incident. Once the workflow is created, you can upload it via the [Workflows](https://docs.keephq.dev/workflows/overview) page. +```yaml +alert: + id: enterprise-tier-alerts + description: Create an incident only if the customer is enterprise. + triggers: + - type: alert + filters: + - key: source + value: datadog + - key: name + value: YourAlertName + steps: + - name: check-if-customer-is-enterprise + provider: + type: postgres + config: "{{ providers.postgres-prod }}" + with: + # Keep will replace {{ alert.customer_id }} with the customer id + query: "SELECT customer_tier, customer_name FROM customers_table WHERE customer_id = {{ alert.customer_id }} LIMIT 1" + actions: + - name: opsgenie-incident + # trigger only if the customer is enterprise + condition: + - name: verify-true + type: assert + assert: "{{ steps.check-if-customer-is-enterprise.results[0] }} == 'enterprise'" + provider: + type: opsgenie + config: " {{ providers.opsgenie-prod }} " + with: + message: "A new alert on enteprise customer ( {{ steps.check-if-customer-is-enterprise.results[1] }} )" +``` + +## Send a slack message for every Cloudwatch alarm +1. Connect your Cloudwatch(/es) and Slack to Keep. +2. Create a simple Workflow that filters for CloudWatch events and sends a Slack message: +```yaml +workflow: + id: cloudwatch-slack + description: Send a slack message when a cloudwatch alarm is triggered + triggers: + - type: alert + filters: + - key: source + value: cloudwatch + actions: + - name: trigger-slack + provider: + type: slack + config: " {{ providers.slack-prod }} " + with: + message: "Got alarm from aws cloudwatch! {{ alert.name }}" + +``` + + +## Monitor a HTTP service +Suppose you want to monitor an HTTP service. +All you have to do is upload the following workflow: + +```yaml +workflow: + id: monitor-http-service + description: Monitor a HTTP service each 10 seconds + triggers: + - type: interval + value: 10 + steps: + - name: simple-http-request + provider: + type: http + with: + method: GET + url: 'https://YOUR_SERVICE_URL/' + timeout: 2 + verify: true + actions: + - name: trigger-slack + condition: + - name: assert-condition + type: assert + assert: '{{ steps.simple-http-request.results.status_code }} == 200' + provider: + type: slack + config: ' {{ providers.slack-prod }} ' + with: + message: "HTTP Request Status: {{ steps.simple-http-request.results.status_code }}\nHTTP Request Body: {{ steps.simple-http-request.results.body }}" + on-failure: + # Just need a provider we can use to send the failure reason + provider: + type: slack + config: ' {{ providers.slack-prod }} ' + +``` diff --git a/docs/overview/introduction.mdx b/docs/overview/introduction.mdx new file mode 100644 index 0000000000..b52681e876 --- /dev/null +++ b/docs/overview/introduction.mdx @@ -0,0 +1,26 @@ +--- +title: "Introduction" +description: "Keep is an open-source alert management and automation tool that provides everything you need to collect, enrich and manange alerts effectively." +--- + You can start using Keep by logging in to the [platform](https://platform.keephq.dev). + +## What's an alert? + +An alert is an event that is triggered when something undesirable occurs or is about to occur. +It is usually triggered by monitoring tools such as Prometheus, Grafana, or CloudWatch, and some proprietary tools. + +Alerts are usually categorized into three different groups: +- Infrastructure-related alerts - e.g., a virtual machine consumes more than 99% CPU. +- Application-related alerts - e.g., an endpoint starts returning 5XX status codes. +- Business-related alerts - e.g., a drop in the number of sign-ins or purchases. + +## What problem does Keep solve? +Keep helps with every step of the alert lifecycle: +1. Maintenance - Keep integrates with all of your monitoring tools, allowing you to manage all of your alerts within a single interface. +2. Noise reduction - By integrating with monitoring tools, Keep can deduplicate and correlate alerts to reduce noise in your organization. There are 2 types of deduplication: Rule-based (all distributions), and AI-based (Keep Enterprise only). +3. Automation - [Keep Workflows](/workflows) enables automated alert enrichment and response. +4. Incident Correlation - Automatically assigns alerts to incidents, performs triage, and conducts root cause analysis (Keep Enterprise only). +5. Summarization - Keep summarizes incidents based on past incidents and a knowledge base (Keep Enterprise only). + +## How does Keep integrate with alerts? +Alerts can either be [pulled](/platform/alerts#pulled-alerts) by Keep or [pushed](/platform/alerts#pushed-alerts) into it. Keep also offers zero-click alert instrumentation through [webhook installation](/providers/overview). \ No newline at end of file diff --git a/docs/overview/keyconcepts.mdx b/docs/overview/keyconcepts.mdx new file mode 100644 index 0000000000..07c2193fe4 --- /dev/null +++ b/docs/overview/keyconcepts.mdx @@ -0,0 +1,50 @@ +--- +title: "Key concepts" +--- +## Alert +An alert is an event that is triggered when something bad happens or going to happen. +The term "alert" can sometimes be interchanged with "alarm" (e.g. in CloudWatch) or "monitor" (Datadog). + +## Incident +An incident is a group of alerts that are related to each other. + +## Provider +A provider can be a module that pulls alerts into Keep or pushes data out of keep by interacting with external systems. + +### Provider as a data source +Within the context of a Workflow, a Provider can: +- Query data - query Datadog's API or run a SQL query against a database. +- Push data - send a Slack message or create a PagerDuty incident. + +### Provider as an alert source +When you connect a Provider, Keep begins to read and process alerts from that Provider. For example, after connecting your Prometheus instance, you'll start seeing your Prometheus alerts in Keep. +A Provider can either push alerts into Keep, or Keep can pull alerts from the Provider. + +#### Push alerts to Keep (Manual) +You can configure your alert source to push alerts into Keep. + +For example, consider Promethues. If you want to push alerts from Promethues to Keep, you'll need to configure Promethues Alertmanager to send the alerts to +'https://api.keephq.dev/alerts/event/prometheus' using API key authentication. Each Provider implements Push mechanism and is documented under the specific Provider page. + +#### Push alerts to Keep (Automatic) +In compatible tools, Keep can automatically integrate with the alerting policy of the source tool and add itself as an alert destination. You can learn more about Webhook Integration [here](/providers/overview). +Please note that this will slightly modify your monitors/notification policy. + +### Pull alerts by Keep +Keep also integrates with the alert APIs of various tools and can automatically pull alerts. While pulling is easier to set up (requiring only credentials), pushing is preferable when automation is involved. + +## Workflow +Workflows consist of a list of [Steps](/workflows/overview#steps) and [Actions](/workflows/overview#actions). +A workflow can be triggered in the following ways: +- When an Alert is triggered. +- In a predefined interval. +- Manually. + +Workflows are commonly used to: +1. Enrich your alerts with more context. +2. Automate the response to alert. +3. Create multi-step alerts. + +## API first +Keep is an API-first platform, meaning that anything you can do via the UI can also be accomplished through the [API](/api-ref) +This gives you the flexibility to integrate Keep with your existing stack and to automate alert remediation and enrichment processes. diff --git a/docs/overview/maintenance-windows.mdx b/docs/overview/maintenance-windows.mdx new file mode 100644 index 0000000000..a9d05ac1fe --- /dev/null +++ b/docs/overview/maintenance-windows.mdx @@ -0,0 +1,59 @@ +--- +title: "Maintenance Windows" +--- + +# Alert Management: Maintenance Windows + +Keep's Maintenance Windows feature provides a critical mechanism for managing alert noise during scheduled maintenance periods or other planned events. By defining Maintenance Window rules, users can suppress alerts that are irrelevant during these times, ensuring that only actionable alerts reach the operations team. + +## Introduction + +In dynamic IT environments, it's common to have periods where certain alerts are expected and should not trigger incident responses. Keep's Maintenance Windows feature allows users to define specific rules that temporarily suppress alerts based on various conditions, such as time windows or alert attributes. This helps prevent unnecessary alert fatigue and ensures that teams can focus on critical issues. + +## How It Works + +1. **Maintenance Window Rule Definition**: Users define Maintenance Window rules specifying the conditions under which alerts should be suppressed. +2. **Condition Specification**: A CEL (Common Expression Language) query is associated with each Maintenance Window rule to define the conditions for suppression. +3. **Time Window Configuration**: Maintenance Window rules can be set for specific start and end times, or based on a relative duration. +4. **Alert Suppression**: During the active period of a Maintenance Window rule, any alerts matching the defined conditions are either suppressed and **not shown in alerts feed** or shown in the feed in suppressed status (**this is configurable**). + +## Practical Example + +Suppose your team schedules a database upgrade that could trigger numerous non-critical alerts. You can create a Maintenance Window rule that suppresses alerts from the database service during the upgrade window. This ensures that your operations team isn't overwhelmed by non-actionable alerts, allowing them to focus on more critical issues. + +## Core Concepts + +- **Maintenance Window Rules**: Configurations that define when and which alerts should be suppressed based on time windows and conditions. +- **CEL Query**: A query language used to specify the conditions under which alerts should be suppressed. For example, a CEL query might suppress alerts where the source is a specific service during a maintenance window. +- **Time Window**: The specific start and end times or relative duration during which the Maintenance Window rule is active. +- **Alert Suppression**: The process of ignoring alerts that match the Maintenance Window rule's conditions during the specified time window. + +## Status-Based Filtering in Maintenance Windows + +In Keep, certain alert statuses are automatically ignored by Maintenance Window rules. Specifically, alerts with the statuses RESOLVED and ACKNOWLEDGED are not suppressed by Maintenance Window rules. This is intentional to ensure that resolving alerts can still be processed and appropriately close or update active incidents. + +### Why Are Some Statuses Ignored? + + โ€ข RESOLVED Alerts: These alerts indicate that an issue has been resolved. By allowing these alerts to bypass Maintenance Window rules, Keep ensures that any active incidents related to the alert can be properly closed, maintaining the integrity of the alert lifecycle. + โ€ข ACKNOWLEDGED Alerts: These alerts have been acknowledged by an operator, signaling that they are being addressed. Ignoring these alerts in Maintenance Windows ensures that operators can track the progress of incidents and take necessary actions without interference. + +By excluding these statuses from Maintenance Window suppression, Keep allows for the continuous and accurate management of alerts, even during Maintenance Window periods, ensuring that resolution processes are not disrupted. + +## Creating a Maintenance Window Rule + +To create a Maintenance Window rule: + + + + + +1. **Define the Maintenance Window Name and Description**: Provide a name and optional description for the Maintenance Window rule to easily identify its purpose. +2. **Specify the CEL Query**: Use CEL to define the conditions under which alerts should be suppressed (e.g., `source == "database"`). +3. **Set the Time Window**: Choose a specific start and end time, or define a relative duration for the Maintenance Window. +4. **Enable the Rule**: Decide whether the rule should be active immediately or scheduled for future use. + +## Best Practices + +- **Plan Maintenance Windows in Advance**: Schedule Maintenance Window periods in advance for known maintenance windows to prevent unnecessary alerts. +- **Use Specific Conditions**: Define precise CEL queries to ensure only the intended alerts are suppressed. +- **Review and Update Maintenance Windows**: Regularly review active Maintenance Window rules to ensure they are still relevant and adjust them as necessary. diff --git a/docs/overview/presets.mdx b/docs/overview/presets.mdx new file mode 100644 index 0000000000..3480ef040c --- /dev/null +++ b/docs/overview/presets.mdx @@ -0,0 +1,81 @@ +--- +description: "CEL-Based Alert Filtering" +title: "Presets" +--- + +With Keep's introduction of CEL (Common Expression Language) for alert filtering, users gain the flexibility to define more complex and precise alert filtering logic. This feature allows the creation of customizable filters using CEL expressions to refine alert visibility based on specific criteria. + +## Introduction + +CEL-based filtering offers a powerful method for users to specify conditions under which alerts should be shown. Through a combination of logical, comparison, and string operations, alerts can be filtered to meet the exact needs of the user, improving the focus and efficiency of alert management. + +## How It Works + +1. **CEL Expression Creation**: Users craft CEL expressions that define the filtering criteria for alerts. +2. **Preset Definition**: These expressions can be saved as presets for easy application to different alert streams. +3. **Alert Filtering**: When applied, the CEL expressions evaluate each alert against the defined criteria, filtering the alert stream in real-time. + +## Practical Example + +For instance, a user could create a CEL expression to filter alerts by severity and source, such as `severity == 'critical' && service.contains('database')`, ensuring only critical alerts from database services are displayed. + +## Core Concepts + +- **CEL Expressions**: The CEL language syntax used to define alert filtering logic. +- **Presets**: Saved CEL expressions that can be reused across different alert streams. +- **Real-Time Filtering**: The dynamic application of CEL expressions to incoming alerts. + +## Creating a CEL Expression + +There is generally two ways of creating a CEL expression in Keep +### Importing from an SQL query + +1. Click on the "Import from SQL" button + + + +2. Write/Paste your SQL query and hit the "Convert to CEL" button + + + +Which in turn will generate and apply a valid CEL query: + + + + +### Manually creating CEL query + +Use the [CEL Language Definition](https://github.com/google/cel-spec/blob/master/doc/langdef.md) documentation to better understand the capabilities of the Common Expression Language +This is an example of how to query all the alerts that came from `Sentry` + + + +If the CEL syntax you typed in is invalid, an error message will show up (in this case, we used invalid `''` instead of `""`): + + + + +## Save Presets + +You can save your CEL queries into a `Preset` using the "Save current filter as a view" button + + + +You can name your `Preset` and configure whether it is "Private" (only the creating user will see this Preset) or account-wide available. + + + +The `Preset` will then be created and available for you to quickly navigate and used + + + + +## Best Practices + +- **Specificity in Expressions**: Craft expressions that precisely target the desired alerts to avoid filtering out relevant alerts. +- **Presets Management**: Regularly review and update your presets to align with evolving alerting needs. +- **Testing Expressions**: Before applying, test CEL expressions to ensure they correctly filter the desired alerts. + +## Useful Links +- [Common Expression Language](https://github.com/google/cel-spec?tab=readme-ov-file) +- [CEL Language Definition](https://github.com/google/cel-spec/blob/master/doc/langdef.md) diff --git a/docs/overview/ruleengine.mdx b/docs/overview/ruleengine.mdx new file mode 100644 index 0000000000..4992a1f1b0 --- /dev/null +++ b/docs/overview/ruleengine.mdx @@ -0,0 +1,28 @@ +--- +title: "Alerts correlations" +--- + +The Keep Rule Engine is a versatile tool for grouping and consolidating alerts into incidents or incident-candidates. +This guide explains the core concepts, usage, and best practices for effectively utilizing the rule engine. + +Access the Rule Engine UI through the Keep platform by navigating to the Rule Builder section. + +## Core Concepts +- **Rule definition**: A rule in Keep is a set of conditions that, when met, creates an incident or incident-candidate. +- **Alert attributes**: These are characteristics or data points of an alert, such as source, severity, or any attribute an alert might have. +- **Conditions and logic**: Rules are built by defining conditions based on alert attributes, using logical operators (like AND/OR) to combine multiple conditions. + +## Creating Rules +Creating a rule involves defining the conditions under which an alert should be categorized or actions should be grouped. + +1. **Accessing the Rule Engine**: Navigate to the Rule Engine section in the Keep platform. +2. **Defining rule criteria**: + - **Name the rule**: Assign a descriptive name that reflects its purpose. + - **Set conditions**: Use alert attributes to create conditions. For example, a rule might specify that an alert with a severity of 'critical' and a source of 'Prometheus' should be categorized as 'High Priority'. + - **Logical grouping**: Combine conditions using logical operators to form comprehensive rules. + - **Manual approve**: Create Incident-candidate or full-fledged incident. + +## Examples +- **Metric-based alerts**: Construct a rule to pinpoint alerts associated with specific metrics, such as high CPU usage on servers. This can be achieved by grouping alerts that share a common attribute, like a 'CPU usage' tag, ensuring you quickly identify and address performance issues. +- **Feature-related alerts**: Establish rules to create incident by specific features or services. For instance, you can start incident based on a 'service' or 'URL' tag. This approach is particularly useful for tracking and managing alerts related to distinct functionalities or components within your application. +- **Team-based alert management**: Implement rules to create incidents according to team responsibilities. This might involve grouping based on the systems or services a particular team oversees. Such a strategy ensures that alerts are promptly directed to the appropriate team, enhancing response times and efficiency. diff --git a/docs/overview/usecases.mdx b/docs/overview/usecases.mdx new file mode 100644 index 0000000000..24389ce06b --- /dev/null +++ b/docs/overview/usecases.mdx @@ -0,0 +1,27 @@ +--- +title: "Use cases" +--- + +## Central alert management +No more navigating between multiple Prometheus instances and dealing with per-region, per-account CloudWatch settings. + +By linking your alert-triggering tools to Keep, you gain a centralized dashboard for managing all your alerts. + + +With Keep, you can review, throttle, mute, and fine-tune all of your alerts from a single console. + +## Alerts enrichment +You're no longer constrained by the alerting mechanisms implemented in your tools. + +Need alerts triggered exclusively for your enterprise customers? No problem. Want to add extra context that isnโ€™t available in your existing tools? Easy. + +Simply connect your observability tools, databases, ticketing systems, or any other tools that can provide additional context, and integrate them with your alerts. + +## Automate the alert response process +There's a saying that goes, "If you can automate the response to an alert, it shouldn't be an alert," right? + +While that might be true in an ideal world, we understand that many times the response to an alert can be automatedโ€”whether by double-checking or taking steps to verify that an alert is not a false positive. + +Consider a common scenarioโ€”you receive a 502 error on one of your endpoints. That's alert-worthy, isn't it? + +But what if you could confirm that it's a genuine error with an additional query? Or even determine if it's a free-trial user whose issue can wait until morning? diff --git a/docs/package-lock.json b/docs/package-lock.json new file mode 100644 index 0000000000..e7c2e35c87 --- /dev/null +++ b/docs/package-lock.json @@ -0,0 +1,7 @@ +{ + "name": "docs", + "lockfileVersion": 3, + "requires": true, + "packages": {} + } + \ No newline at end of file diff --git a/docs/platform/alerts.mdx b/docs/platform/alerts.mdx new file mode 100644 index 0000000000..0b729e9dd8 --- /dev/null +++ b/docs/platform/alerts.mdx @@ -0,0 +1,54 @@ +--- +title: "Alerts" +sidebarTitle: Alerts +--- + +## Overview +You can manage Alerts programmatically using the Alerts API. +The alerts page let you manage your alerts in a single pane of glass. + + +## View your alerts + +By connecting Providers, you get a single pane of glass for your alerts: + + + + +## Pushed alerts + + + + +See all of the alerts that were pushed into Keep. + +## Pulled alerts + + + + +See all of the alerts that were pulled by Keep. + + +## Alert history +To see an alert history, just click on the history button: + + + + + +## Go to the original alert +You can see your alert in the origin tool by clicking on "Open Alert": + + + diff --git a/docs/platform/alertseverityandstatus.mdx b/docs/platform/alertseverityandstatus.mdx new file mode 100644 index 0000000000..d5859d1f4f --- /dev/null +++ b/docs/platform/alertseverityandstatus.mdx @@ -0,0 +1,47 @@ +--- +title: "Alerts Severity and Status" +--- + +In Keep, alerts are treated as first-class citizens, with clearly defined severities and statuses to aid in quick and efficient response. + +## Alert Severity +Alert severity in Keep is classified into five categories, helping teams prioritize their response based on the urgency and impact of the alert. + +| Severity Level | Description | Expected Value | +|----------------|-------------------------------------------------------|----------------| +| CRITICAL | Requires immediate action. | "critical" | +| HIGH | Needs to be addressed soon. | "high" | +| WARNING | Indicates a potential problem. | "warning" | +| INFO | Provides information, no immediate action required. | "info" | +| LOW | Minor issues or lowest priority. | "low" | + +## Alert Status +The status of an alert in Keep reflects its current state in the alert lifecycle. + +| Status | Description | Expected Value | +|--------------|-----------------------------------------------------------------------------|----------------| +| FIRING | Active alert indicating an ongoing issue. | "firing" | +| RESOLVED | The issue has been resolved, and the alert is no longer active. | "resolved" | +| ACKNOWLEDGED | The alert has been acknowledged but not resolved. | "acknowledged" | +| SUPPRESSED | Alert is suppressed due to various reasons. | "suppressed" | +| PENDING | No Data or insufficient data to determine the alert state. | "pending" | + + +## Provider Alert Mappings +Different providers might have their specific ways of defining and handling alert severity and status. +Keep standardizes these variations by mapping them to the defined enums (AlertSeverity and AlertStatus). + +Here's how various providers align with Keep's alert system: + +| Provider | Severity Mapping | Status Mapping | +|---------------|--------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------| +| CloudWatch | N/A | ALARM -> FIRING, OK -> RESOLVED, INSUFFICIENT_DATA -> PENDING | +| Prometheus | "critical" -> CRITICAL "warning" -> WARNING, "info" -> INFO, "low" -> LOW | "firing" -> FIRING, "resolved" -> RESOLVED | +| Datadog | "P4" -> INFO, "P3" -> WARNING, "P2" -> HIGH, "P1" -> CRITICAL | "Triggered" -> FIRING, "Recovered" -> RESOLVED, "Muted" -> SUPPRESSED | +| PagerDuty | "P1" -> CRITICAL, "P2" -> HIGH, "P3" -> WARNING, "P4" -> INFO | "triggered" -> FIRING, "acknowledged" -> ACKNOWLEDGED, "resolved" -> RESOLVED | +| Pingdom | N/A | "down" -> FIRING, "up" -> RESOLVED, "paused" -> SUPPRESSED | +| Dynatrace | "critical" -> CRITICAL, "warning" -> WARNING, "info" -> INFO | "open" -> FIRING, "closed" -> RESOLVED, "acknowledged" -> ACKNOWLEDGED | +| Grafana | "critical" -> CRITICAL, "high" -> HIGH, "warning" -> WARNING, "info" -> INFO | "ok" -> RESOLVED, "paused" -> SUPPRESSED, "alerting" -> FIRING, "pending" -> PENDING, "no_data" -> PENDING | +| New Relic | "critical" -> CRITICAL, "warning" -> WARNING, "info" -> INFO | "open" -> FIRING, "closed" -> RESOLVED, "acknowledged" -> ACKNOWLEDGED | +| Sentry | "fatal" -> CRITICAL, "error" -> HIGH, "warning" -> WARNING, "info" -> INFO, "debug" -> LOW | "resolved" -> RESOLVED, "unresolved" -> FIRING, "ignored" -> SUPPRESSED | +| Zabbix | "not_classified" -> LOW, "information" -> INFO, "warning" -> WARNING, "average" -> WARNING, "high" -> HIGH, "disaster" -> CRITICAL | "problem" -> FIRING, "ok" -> RESOLVED, "acknowledged" -> ACKNOWLEDGED, "suppressed" -> SUPPRESSED | diff --git a/docs/platform/overview.mdx b/docs/platform/overview.mdx new file mode 100644 index 0000000000..bb3f3b809a --- /dev/null +++ b/docs/platform/overview.mdx @@ -0,0 +1,15 @@ +--- +title: "Overview" +sidebarTitle: Overview +--- +Keep is fully open source. If you want to start Keep on your local environment, see the deployment section. +Keep is API first. Everything you do on the UI can be done via API. + +The platform is accessible on https://platform.keephq.dev and let you start the journey of improving your alerts. + +The platform is currently built on top of: + +1. [Providers](/providers/overview) - connect your stack to Keep. +2. [Alerts](/platform/alerts) - single pane of glass for your alerts. +3. [Workflows](/workflows/overview) - create automations on top of your alerts (or regardless). +4. [Settings](/platform/settings) - the settings page (add users, etc). diff --git a/docs/platform/settings.mdx b/docs/platform/settings.mdx new file mode 100644 index 0000000000..6f472a9777 --- /dev/null +++ b/docs/platform/settings.mdx @@ -0,0 +1,41 @@ +--- +title: "Settings" +sidebarTitle: Settings +--- + +# Overview +Setup and configure Keep. + +## Users +Add or remove users from your tenant. + + + + + +## Webhook +View your tenant webhook settings. + + + + + +## SMTP +Configure your SMTP server to send emails. + + + + + +### Get an API Key + + + diff --git a/docs/platform/support.mdx b/docs/platform/support.mdx new file mode 100644 index 0000000000..b220d117fb --- /dev/null +++ b/docs/platform/support.mdx @@ -0,0 +1,16 @@ +--- +title: "Support" +sidebarTitle: Support +--- + +## Overview +You can use the following methods to ask for support/help with anything related with Keep: + + + + You can use the [Keep Slack community](https://slack.keephq.dev) to get support. + + + You can use support@keephq.dev to send inquiries. + + diff --git a/docs/providers/adding-a-new-provider.mdx b/docs/providers/adding-a-new-provider.mdx new file mode 100644 index 0000000000..d81520cda7 --- /dev/null +++ b/docs/providers/adding-a-new-provider.mdx @@ -0,0 +1,577 @@ +--- +title: "Adding a new Provider" +sidebarTitle: "Adding a New Provider" +--- +Under contstruction + +### Basics + +- BaseProvider is the base class every provider needs to inherit from +- BaseProvider exposes 4 important functions: + - `query(self, **kwargs: dict)` which is used to query the provider in steps + - `notify(self, **kwargs: dict)` which is used to notify via the provider in actions + - `dispose(self)` which is used to dispose the provider after usage (e.g. close the connection to the DB) + - `validate_config(self)` which is used to validate the configuration passed to the Provider +- And 4 functions that are not required: + - `get_alerts(self)` which is used to fetch configured alerts (**not the currently active alerts**) + - `deploy_alert(self, alert: dict, alert_id: Optional[str]` which is used to deploy an alert to the provider + - `get_alert_schema(self)` which is used to describe the provider's API schema of how to deploy alert + - `get_logs(self, limit)` which is used to fetch logs from the provider (currently used by the AI layer to generate more accurate results) +- Providers must be located in the providers directory +- Provider directory must start with the provider's unique identifier followed by underscore+provider (e.g. `slack_provider`) +- Provider file name must start with the provider's unique identifier followed by underscore+provider+.py (e.g. `slack_provider.py`) + +### ProviderScope +```python +@dataclass +class ProviderScope: + """ + Provider scope model. + + Args: + name (str): The name of the scope. + description (Optional[str]): The description of the scope. + mandatory (bool): Whether the scope is mandatory. + mandatory_for_webhook (bool): Whether the scope is mandatory for webhook auto installation. + documentation_url (Optional[str]): The documentation url of the scope. + alias (Optional[str]): Another alias of the scope. + """ + + name: str + description: Optional[str] = None + mandatory: bool = False + mandatory_for_webhook: bool = False + documentation_url: Optional[str] = None + alias: Optional[str] = None +``` + +### ProviderConfig + +```python +@dataclass +class ProviderConfig: + """ + Provider configuration model. + + Args: + description (Optional[str]): The description of the provider. + authentication (dict): The configuration for the provider. + """ + + authentication: Optional[dict] + name: Optional[str] = None + description: Optional[str] = None + + def __post_init__(self): + if not self.authentication: + return + for key, value in self.authentication.items(): + if ( + isinstance(value, str) + and value.startswith("{{") + and value.endswith("}}") + ): + self.authentication[key] = chevron.render(value, {"env": os.environ}) +``` + +### BaseProvider + +```python +""" +Base class for all providers. +""" +class BaseProvider(metaclass=abc.ABCMeta): + OAUTH2_URL = None + PROVIDER_SCOPES: list[ProviderScope] = [] + PROVIDER_METHODS: list[ProviderMethod] = [] + FINGERPRINT_FIELDS: list[str] = [] + PROVIDER_TAGS: list[ + Literal["alert", "ticketing", "messaging", "data", "queue"] + ] = [] + + def __init__( + self, + context_manager: ContextManager, + provider_id: str, + config: ProviderConfig, + webhooke_template: Optional[str] = None, + webhook_description: Optional[str] = None, + provider_description: Optional[str] = None, + ): + """ + Initialize a provider. + + Args: + provider_id (str): The provider id. + **kwargs: Provider configuration loaded from the provider yaml file. + """ + self.provider_id = provider_id + + self.config = config + self.webhooke_template = webhooke_template + self.webhook_description = webhook_description + self.provider_description = provider_description + self.context_manager = context_manager + self.logger = context_manager.get_logger() + self.validate_config() + self.logger.debug( + "Base provider initalized", extra={"provider": self.__class__.__name__} + ) + self.provider_type = self._extract_type() + self.results = [] + # tb: we can have this overriden by customer configuration, when initializing the provider + self.fingerprint_fields = self.FINGERPRINT_FIELDS + + def _extract_type(self): + """ + Extract the provider type from the provider class name. + + Returns: + str: The provider type. + """ + name = self.__class__.__name__ + name_without_provider = name.replace("Provider", "") + name_with_spaces = ( + re.sub("([A-Z])", r" \1", name_without_provider).lower().strip() + ) + return name_with_spaces.replace(" ", ".") + + @abc.abstractmethod + def dispose(self): + """ + Dispose of the provider. + """ + raise NotImplementedError("dispose() method not implemented") + + @abc.abstractmethod + def validate_config(): + """ + Validate provider configuration. + """ + raise NotImplementedError("validate_config() method not implemented") + + def validate_scopes(self) -> dict[str, bool | str]: + """ + Validate provider scopes. + + Returns: + dict: where key is the scope name and value is whether the scope is valid (True boolean) or string with error message. + """ + return {} + + def notify(self, **kwargs): + """ + Output alert message. + + Args: + **kwargs (dict): The provider context (with statement) + """ + # trigger the provider + results = self._notify(**kwargs) + self.results.append(results) + # if the alert should be enriched, enrich it + enrich_alert = kwargs.get("enrich_alert", []) + if not enrich_alert or not results: + return results if results else None + + self._enrich_alert(enrich_alert, results) + return results + + def _enrich_alert(self, enrichments, results): + """ + Enrich alert with provider specific data. + + """ + self.logger.debug("Extracting the fingerprint from the alert") + if "fingerprint" in results: + fingerprint = results["fingerprint"] + elif self.context_manager.foreach_context.get("value", {}): + # TODO: if it's zipped, we need to extract the fingerprint from the zip (i.e. multiple foreach) + fingerprint = self.context_manager.foreach_context.get("value", {}).get( + "fingerprint" + ) + # else, if we are in an event context, use the event fingerprint + elif self.context_manager.event_context: + # TODO: map all casses event_context is dict and update them to the DTO + # and remove this if statement + if isinstance(self.context_manager.event_context, dict): + fingerprint = self.context_manager.event_context.get("fingerprint") + # Alert DTO + else: + fingerprint = self.context_manager.event_context.fingerprint + else: + fingerprint = None + + if not fingerprint: + self.logger.error( + "No fingerprint found for alert enrichment", + extra={"provider": self.provider_id}, + ) + raise Exception("No fingerprint found for alert enrichment") + self.logger.debug("Fingerprint extracted", extra={"fingerprint": fingerprint}) + + _enrichments = {} + # enrich only the requested fields + for enrichment in enrichments: + try: + if enrichment["value"].startswith("results."): + val = enrichment["value"].replace("results.", "") + parts = val.split(".") + r = copy.copy(results) + for part in parts: + r = r[part] + _enrichments[enrichment["key"]] = r + else: + _enrichments[enrichment["key"]] = enrichment["value"] + except Exception: + self.logger.error( + f"Failed to enrich alert - enrichment: {enrichment}", + extra={"fingerprint": fingerprint, "provider": self.provider_id}, + ) + continue + self.logger.info("Enriching alert", extra={"fingerprint": fingerprint}) + try: + enrich_alert(self.context_manager.tenant_id, fingerprint, _enrichments) + except Exception as e: + self.logger.error( + "Failed to enrich alert in db", + extra={"fingerprint": fingerprint, "provider": self.provider_id}, + ) + raise e + self.logger.info("Alert enriched", extra={"fingerprint": fingerprint}) + + def _notify(self, **kwargs): + """ + Output alert message. + + Args: + **kwargs (dict): The provider context (with statement) + """ + raise NotImplementedError("notify() method not implemented") + + def _query(self, **kwargs: dict): + """ + Query the provider using the given query + + Args: + kwargs (dict): The provider context (with statement) + + Raises: + NotImplementedError: _description_ + """ + raise NotImplementedError("query() method not implemented") + + def query(self, **kwargs: dict): + # just run the query + results = self._query(**kwargs) + # now add the type of the results to the global context + if results and isinstance(results, list): + self.context_manager.dependencies.add(results[0].__class__) + elif results: + self.context_manager.dependencies.add(results.__class__) + + enrich_alert = kwargs.get("enrich_alert", []) + if enrich_alert: + self._enrich_alert(enrich_alert, results) + # and return the results + return results + + @staticmethod + def _format_alert( + event: dict, provider_instance: "BaseProvider" = None + ) -> AlertDto | list[AlertDto]: + raise NotImplementedError("format_alert() method not implemented") + + @classmethod + def format_alert(cls, event: dict) -> AlertDto | list[AlertDto]: + logger = logging.getLogger(__name__) + logger.debug("Formatting alert") + formatted_alert = cls._format_alert(event) + logger.debug("Alert formatted") + return formatted_alert + + @staticmethod + def get_alert_fingerprint(alert: AlertDto, fingerprint_fields: list = []) -> str: + """ + Get the fingerprint of an alert. + + Args: + event (AlertDto): The alert to get the fingerprint of. + fingerprint_fields (list, optional): The fields we calculate the fingerprint upon. Defaults to []. + + Returns: + str: hexdigest of the fingerprint or the event.name if no fingerprint_fields were given. + """ + if not fingerprint_fields: + return alert.name + fingerprint = hashlib.sha256() + event_dict = alert.dict() + for fingerprint_field in fingerprint_fields: + fingerprint_field_value = event_dict.get(fingerprint_field, None) + if isinstance(fingerprint_field_value, (list, dict)): + fingerprint_field_value = json.dumps(fingerprint_field_value) + if fingerprint_field_value: + fingerprint.update(str(fingerprint_field_value).encode()) + return fingerprint.hexdigest() + + def get_alerts_configuration(self, alert_id: Optional[str] = None): + """ + Get configuration of alerts from the provider. + + Args: + alert_id (Optional[str], optional): If given, gets a specific alert by id. Defaults to None. + """ + # todo: we'd want to have a common alert model for all providers (also for consistent output from GPT) + raise NotImplementedError("get_alerts() method not implemented") + + def deploy_alert(self, alert: dict, alert_id: Optional[str] = None): + """ + Deploy an alert to the provider. + + Args: + alert (dict): The alert to deploy. + alert_id (Optional[str], optional): If given, deploys a specific alert by id. Defaults to None. + """ + raise NotImplementedError("deploy_alert() method not implemented") + + def _get_alerts(self) -> list[AlertDto]: + """ + Get alerts from the provider. + """ + raise NotImplementedError("get_alerts() method not implemented") + + def get_alerts(self) -> list[AlertDto]: + """ + Get alerts from the provider. + """ + with tracer.start_as_current_span(f"{self.__class__.__name__}-get_alerts"): + alerts = self._get_alerts() + # enrich alerts with provider id + for alert in alerts: + alert.providerId = self.provider_id + return alerts + + def get_alerts_by_fingerprint(self, tenant_id: str) -> dict[str, list[AlertDto]]: + """ + Get alerts from the provider grouped by fingerprint, sorted by lastReceived. + + Returns: + dict[str, list[AlertDto]]: A dict of alerts grouped by fingerprint, sorted by lastReceived. + """ + alerts = self.get_alerts() + + if not alerts: + return {} + + # get alerts, group by fingerprint and sort them by lastReceived + with tracer.start_as_current_span(f"{self.__class__.__name__}-get_last_alerts"): + get_attr = operator.attrgetter("fingerprint") + grouped_alerts = { + fingerprint: list(alerts) + for fingerprint, alerts in itertools.groupby( + sorted( + alerts, + key=get_attr, + ), + get_attr, + ) + } + + # enrich alerts + with tracer.start_as_current_span(f"{self.__class__.__name__}-enrich_alerts"): + pulled_alerts_enrichments = get_enrichments( + tenant_id=tenant_id, + fingerprints=grouped_alerts.keys(), + ) + for alert_enrichment in pulled_alerts_enrichments: + if alert_enrichment: + alerts_to_enrich = grouped_alerts.get( + alert_enrichment.alert_fingerprint + ) + for alert_to_enrich in alerts_to_enrich: + parse_and_enrich_deleted_and_assignees( + alert_to_enrich, alert_enrichment.enrichments + ) + for enrichment in alert_enrichment.enrichments: + # set the enrichment + setattr( + alert_to_enrich, + enrichment, + alert_enrichment.enrichments[enrichment], + ) + + return grouped_alerts + + def setup_webhook( + self, tenant_id: str, keep_api_url: str, api_key: str, setup_alerts: bool = True + ): + """ + Setup a webhook for the provider. + + Args: + tenant_id (str): _description_ + keep_api_url (str): _description_ + api_key (str): _description_ + setup_alerts (bool, optional): _description_. Defaults to True. + + Raises: + NotImplementedError: _description_ + """ + raise NotImplementedError("setup_webhook() method not implemented") + + @staticmethod + def get_alert_schema() -> dict: + """ + Get the alert schema description for the provider. + e.g. How to define an alert for the provider that can be pushed via the API. + + Returns: + str: The alert format description. + """ + raise NotImplementedError( + "get_alert_format_description() method not implemented" + ) + + @staticmethod + def oauth2_logic(**payload) -> dict: + """ + Logic for oauth2 authentication. + + For example, in Slack oauth2, we need to get the code from the payload and exchange it for a token. + + return: dict: The secrets to be saved as the provider configuration. (e.g. the Slack access token) + """ + raise NotImplementedError("oauth2_logic() method not implemented") + + @staticmethod + def parse_event_raw_body(raw_body: bytes) -> bytes: + """ + Parse the raw body of an event and create an ingestable dict from it. + + For instance, in parseable, the "event" is just a string + > b'Alert: Server side error triggered on teststream1\nMessage: server reporting status as 500\nFailing Condition: status column equal to abcd, 2 times' + and we want to return an object + > b"{'alert': 'Server side error triggered on teststream1', 'message': 'server reporting status as 500', 'failing_condition': 'status column equal to abcd, 2 times'}" + + If this method is not implemented for a provider, just return the raw body. + + Args: + raw_body (bytes): The raw body of the incoming event (/event endpoint in alerts.py) + + Returns: + dict: Ingestable event + """ + return raw_body + + def get_logs(self, limit: int = 5) -> list: + """ + Get logs from the provider. + + Args: + limit (int): The number of logs to get. + """ + raise NotImplementedError("get_logs() method not implemented") + + def expose(self): + """Expose parameters that were calculated during query time. + + Each provider can expose parameters that were calculated during query time. + E.g. parameters that were supplied by the user and were rendered by the provider. + + A concrete example is the "_from" and "to" of the Datadog Provider which are calculated during execution. + """ + # TODO - implement dynamically using decorators and + return {} + + def start_consume(self): + """Get the consumer for the provider. + + should be implemented by the provider if it has a consumer. + + for an example, see Kafka Provider + + Returns: + Consumer: The consumer for the provider. + """ + return + + def status(self) -> bool: + """Return the status of the provider. + + Returns: + bool: The status of the provider. + """ + return { + "status": "should be implemented by the provider if it has a consumer", + "error": "", + } + + @property + def is_consumer(self) -> bool: + """Return consumer if the inherited class has a start_consume method. + + Returns: + bool: _description_ + """ + return self.start_consume.__qualname__ != "BaseProvider.start_consume" + + def _push_alert(self, alert: dict): + """ + Push an alert to the provider. + + Args: + alert (dict): The alert to push. + """ + # if this is not a dict, try to convert it to a dict + if not isinstance(alert, dict): + try: + alert_data = json.loads(alert) + except Exception: + alert_data = alert_data + else: + alert_data = alert + + # if this is still not a dict, we can't push it + if not isinstance(alert_data, dict): + self.logger.warning( + "We currently support only alert represented as a dict, dismissing alert", + extra={"alert": alert}, + ) + return + # now try to build the alert model + # we will have a lot of default values here to support all providers and all cases, the + # way to fine tune those would be to use the provider specific model or enforce that the event from the queue will be casted into the fields + alert_model = AlertDto( + id=alert_data.get("id", str(uuid.uuid4())), + name=alert_data.get("name", "alert-from-event-queue"), + status=alert_data.get("status", AlertStatus.FIRING), + lastReceived=alert_data.get("lastReceived", datetime.datetime.now()), + environment=alert_data.get("environment", "alert-from-event-queue"), + isDuplicate=alert_data.get("isDuplicate", False), + duplicateReason=alert_data.get("duplicateReason", None), + service=alert_data.get("service", "alert-from-event-queue"), + source=alert_data.get("source", [self.provider_type]), + message=alert_data.get("message", "alert-from-event-queue"), + description=alert_data.get("description", "alert-from-event-queue"), + severity=alert_data.get("severity", AlertSeverity.INFO), + pushed=alert_data.get("pushed", False), + event_id=alert_data.get("event_id", str(uuid.uuid4())), + url=alert_data.get("url", None), + fingerprint=alert_data.get("fingerprint", None), + ) + # push the alert to the provider + url = f'{os.environ["KEEP_API_URL"]}/alerts/event' + headers = { + "Content-Type": "application/json", + "Accept": "application/json", + "X-API-KEY": self.context_manager.api_key, + } + response = requests.post(url, json=alert_model.dict(), headers=headers) + try: + response.raise_for_status() + self.logger.info("Alert pushed successfully") + except Exception: + self.logger.error( + f"Failed to push alert to {self.provider_id}: {response.content}" + ) +``` diff --git a/docs/providers/documentation/aks-provider.mdx b/docs/providers/documentation/aks-provider.mdx new file mode 100644 index 0000000000..e92cae3449 --- /dev/null +++ b/docs/providers/documentation/aks-provider.mdx @@ -0,0 +1,49 @@ +--- +title: "Azure AKS" +description: "Azure AKS provider to view kubernetes resources." +--- + +## Inputs + +- **command_type** (required): The command type to operate on the k8s cluster (`get_pods`, `get_pvc`, `get_node_pressure`). + +## Outputs + +Azure AKS Provider currently support the `query` function. + +## Authentication Parameters + +The Azure AKS Provider uses subscription_id, resource_name, resource_group_name, client_id, client_secret and tenant_id to allow you to query your cluster resources. You need to provide the following authentication parameters to connect: + +- **subscription_id** (required): The subscription id of your azure account. +- **client_id** (required): The client id from your rbac config generated in azure. +- **client_secret** (required): The client secret from your rbac config generated in azure. +- **tenant_id** (required): The tenant id from your rbac config generated in azure. +- **resource_group_name** (required): The resource group name where your aks is created. +- **resource_name** (required): The cluster name of your aks. + +## Connecting with the Provider + +To connect to Azure AKS, follow below steps: + +1. Log in to your [Azure](https://azure.microsoft.com/) account. +2. Go to your kubernetes service page and click on `Connect` button and then click on `Open Cloud Shell`. +3. Run `az ad sp create-for-rbac --role owner --scopes /subscriptions/` in the cloud shell, you will get response similar to: + ``` + { + "appId": "xxxxxx-xxxxx-xxxxxx-xxxx", + "displayName": "azure-cli-2023-11-06-13-00-52", + "password": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "tenant": "xxxxx-xxxxx-xxxx-xxxxx" + } + ``` + In above JSON object, the `appId` is `client_id`, `password` is `client_secret` and `tenant` is `tenant_id` + +## Notes + +- This provider allows you to interact with Azure AKS to query resources in kubernetes cluster. + +## Useful Links + +- [Azure AKS List Cluster User Creds](https://learn.microsoft.com/en-us/rest/api/aks/managed-clusters/list-cluster-user-credentials?view=rest-aks-2023-08-01&tabs=HTTP) +- [Azure AKS Doc](https://learn.microsoft.com/en-us/azure/aks/) diff --git a/docs/providers/documentation/appdynamics-provider.mdx b/docs/providers/documentation/appdynamics-provider.mdx new file mode 100644 index 0000000000..eef0e7a762 --- /dev/null +++ b/docs/providers/documentation/appdynamics-provider.mdx @@ -0,0 +1,37 @@ +--- +title: "AppDynamics" +sidebarTitle: "AppDynamics Provider" +description: "AppDynamics provider allows you to get AppDynamics `alerts/actions` via webhook installation" +--- + +## Authentication Parameters +The AppDynamics provider requires the following authentication parameter: + +- `AppDynamics Username`: Required. This is your AppDynamics account username. +- `AppDynamics Password`: This is the password associated with your AppDynamics Username. +- `AppDynamics Account Name`: This is your account's name. +- `App Id`: The Id of the Application in which you would like to install the webhook. +- `Host`: This is the hostname of the AppDynamics instance you wish to connect to. It identifies the AppDynamics server that the API will interact with. + +## Connecting with the Provider + +Obtain AppDynamics Username and Password: +1. Ensure you have a AppDynamics account with the necessary [permissions](https://docs.appdynamics.com/accounts/en/cisco-appdynamics-on-premises-user-management/roles-and-permissions). The basic permissions required are `Account Owner` or `Administrator`. Alternatively you can create an account (instructions)[https://docs.appdynamics.com/accounts/en/global-account-administration/access-management/manage-user-accounts] +2. Find your account name [here](https://accounts.appdynamics.com/overview). +3. Determine the Host [here](https://accounts.appdynamics.com/overview). +4. Get the appId of the Appdynamics instance in which you wish to install the webhook into. + +## Webhook Integration Modifications + +The webhook integration adds Keep as an alert monitor within the AppDynamics instance. It can be found under the "Alerts & Respond" section. +The integration automatically gains access to the following scopes within AppDynamics: +- `administrator` +- `authenticated` + + +## Useful Links + +- [AppDynamics HTTP Action Templates](https://docs.appdynamics.com/appd/24.x/24.3/en/extend-cisco-appdynamics/cisco-appdynamics-apis/configuration-import-and-export-api#id-.ConfigurationImportandExportAPIv24.2-ImportHTTPActionTemplatesintoanAccount) +- [AppDynamics Permissions and Roles](https://docs.appdynamics.com/accounts/en/cisco-appdynamics-on-premises-user-management/roles-and-permissions) +- [AppDynamics User Accounts](https://docs.appdynamics.com/accounts/en/global-account-administration/access-management/manage-user-accounts) + diff --git a/docs/providers/documentation/auth0-provider.mdx b/docs/providers/documentation/auth0-provider.mdx new file mode 100644 index 0000000000..8eb53bc4e0 --- /dev/null +++ b/docs/providers/documentation/auth0-provider.mdx @@ -0,0 +1,56 @@ +--- +title: "Auth0" +sidebarTitle: "Auth0 Provider" +description: "Auth0 provider allows interaction with Auth0 APIs for authentication and user management." +--- + +## Inputs + +- `client_id`: str : The client ID for the Auth0 application. +- `client_secret`: str : The client secret for the Auth0 application. +- `audience`: str : The audience for the API authorization request. +- `grant_type`: str : The type of authorization grant requested (e.g., `client_credentials`). + +## Outputs + +- `access_token`: The access token issued by Auth0 for authenticated requests. +- `expires_in`: The time in seconds before the access token expires. +- `token_type`: The type of token, typically `Bearer`. + +## Authentication Parameters + +To authenticate with Auth0, the following parameters are needed: +- **client_id**: The unique identifier for your Auth0 application. +- **client_secret**: A secret associated with your application, used for secure communication. +- **audience**: Defines the API resources you're trying to access. + +These parameters can be retrieved from your Auth0 dashboard under the application's settings. + +## Connecting with the Provider + +The Auth0 provider connects to both the **Authentication API** and the **Management API**, enabling functionality such as token-based authentication and user management. Depending on your needs, you can: +- Use the **Authentication API** to obtain access tokens, manage user profiles, or handle multi-factor authentication. +- Use the **Management API** to automate the configuration of your Auth0 environment, register applications, manage users, and more. + +## Example of usage + +```yaml +workflow: + id: auth0-example + description: Auth0 example + triggers: + - type: manual + actions: + - name: auth0 + provider: + type: auth0 + config: "{{ providers.auth0config }}" + with: + client_id: "{{ secrets.auth0_client_id }}" + client_secret: "{{ secrets.auth0_client_secret }}" + audience: "https://api.example.com" + grant_type: "client_credentials" + +##Usefull Links +-[Auth0 API Documentation](https://auth0.com/docs/api) +-[Auth0 as an authentication method for keep](https://docs.keephq.dev/deployment/authentication/auth0-auth) \ No newline at end of file diff --git a/docs/providers/documentation/axiom-provider.mdx b/docs/providers/documentation/axiom-provider.mdx new file mode 100644 index 0000000000..ffc5ebf420 --- /dev/null +++ b/docs/providers/documentation/axiom-provider.mdx @@ -0,0 +1,46 @@ +--- +title: "Axiom Provider" +description: "Axiom Provider is a class that allows to ingest/digest data from Axiom." +--- + +## Inputs + +- **query** (required): AQL to execute +- **dataset** (required): Dataset to query +- **organization_id** (optional): Override the given organization id from configuration +- **nocache** (optional): Whether to cache the response or not +- **startTime** (optional): Start time, defaults to UTC now in ISO format. +- **endTime** (optional): End time, defaults to UTC now in ISO format. + +## Outputs + +Axiom does not currently support the `notify` function. + +## Authentication Parameters + +The Axiom Provider uses API token authentication. You need to provide the following authentication parameters to connect to Axiom: + +- **api_token** (required): Your Axiom API token. +- **organization_id** (optional): The organization ID to access datasets in. + +## Connecting with the Provider + +To connect to Axiom, you need to create an API token from your Axiom account. Follow these steps: + +1. Log in to your Axiom account. +2. Go to the **API Access** page under the **Settings** menu. +3. Click the **Create Token** button and enter a name for the token. +4. Copy the token value and keep it safe. +5. Add the token value to the `authentication` section in the Axiom Provider configuration. + +To access datasets, you need to provide the organization ID. You can find your organization ID in the URL of the Axiom web app. For example, if your Axiom URL is `https://app.axiom.co/organizations/1234`, then your organization ID is `1234`. + +## Notes + +- This provider supports a limited set of features provided by the Axiom API. +- The `startTime` and `endTime` parameters use ISO-8601 format. +- The `query` function returns the response in JSON format from the Axiom API. + +## Useful Links + +- [Axiom API Documentation](https://axiom.co/docs/restapi/introduction) diff --git a/docs/providers/documentation/azuremonitoring-provider.mdx b/docs/providers/documentation/azuremonitoring-provider.mdx new file mode 100644 index 0000000000..65e22a1cd7 --- /dev/null +++ b/docs/providers/documentation/azuremonitoring-provider.mdx @@ -0,0 +1,78 @@ +--- +title: "Azure Monitor" +sidebarTitle: "Azure Monitor Provider" +description: "Azure Monitorg provider allows you to get alerts from Azure Monitor via webhooks." +--- + +## Overview + +The Azure Monitor Provider integrates Keep with Azure Monitor, allowing you to receive alerts within Keep's platform. By setting up a webhook in Azure, you can ensure that critical alerts are sent to Keep, allowing for efficient monitoring and response. + +## Connecting Azure Monitor to Keep + +Connecting Azure Monitor to Keep involves creating an Action Group in Azure, adding a webhook action, and configuring the Alert Rule to use the new Action Group. + +### Step 1: Navigate an Action Group +1. Log in to your Azure portal. +2. Navigate to **Monitor** > **Alerts** > **Action groups**. + + + + + +### Step 2: Create new Action Group +1. Click on **+ Create**. + + + + + + +### Step 3: Fill Action Group details +1. Choose the Subscription and Resource Group. +2. Give the Action Group an indicative name. + + + + + +### Step 4: Go to "Action" and add Keep as a Webhook + + + + + +### Step 5: Test Keep Webhook action + + + + + + + + + +### Step 6: View the alert in Keep + + + + + +## Useful Links +- [Azure Monitor alert webhook](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/alerts-webhooks) +- [Azure Monitor alert payload](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/alerts-payload-samples) +- [Azure Monitor action groups](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/action-groups) diff --git a/docs/providers/documentation/bash-provider.mdx b/docs/providers/documentation/bash-provider.mdx new file mode 100644 index 0000000000..b13850a50d --- /dev/null +++ b/docs/providers/documentation/bash-provider.mdx @@ -0,0 +1,47 @@ +--- +title: "Bash" +sidebarTitle: "Bash Provider" +description: "Bash provider allows executing Bash commands in a workflow, with a limitation for cloud execution." +--- + +## Inputs + +- `script`: str : The Bash script or command to execute. + +## Outputs + +- `stdout`: The standard output from the executed Bash command. +- `stderr`: The standard error output from the executed Bash command (if any). +- `exit_code`: The exit code of the Bash command. + +## Authentication Parameters + +_None required for local execution of Bash scripts._ + +## Connecting with the Provider + +The Bash provider allows you to run Bash commands or scripts in your workflow. You can pass in any valid Bash command, and it will be executed in a local environment. + +### **Cloud Limitation** +This provider is disabled for cloud environments and can only be used in local or self-hosted environments. + +## Example of usage + +```yaml +workflow: + id: bash-example + description: Bash example + triggers: + - type: manual + actions: + - name: bash + provider: + type: bash + config: "{{ providers.bashtest }}" + with: + script: | + echo "Hello, World!" + ls -l + +## Usefull Links +-[Bash Documentation](https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html) \ No newline at end of file diff --git a/docs/providers/documentation/bigquery-provider.mdx b/docs/providers/documentation/bigquery-provider.mdx new file mode 100644 index 0000000000..d3750cab80 --- /dev/null +++ b/docs/providers/documentation/bigquery-provider.mdx @@ -0,0 +1,47 @@ +--- +title: "BigQuery" +sidebarTitle: "BigQuery Provider" +description: "BigQuery provider allows interaction with Google BigQuery for querying and managing datasets." +--- + +## Inputs + +- `query`: str : The SQL query to execute against the BigQuery dataset +- `dataset`: str : The name of the dataset in BigQuery to use for the query +- `project_id`: str : The Google Cloud project ID where the BigQuery dataset is located + +## Outputs + +- `result`: The results of the executed query, returned as a list of dictionaries. + +## Authentication Parameters + +- `service_account_key`: JSON key file for the Google Cloud service account with permissions to access BigQuery. + +## Connecting with the Provider + +1. Create a Google Cloud project and enable the BigQuery API. +2. Create a service account in your Google Cloud project and download the JSON key file. +3. Share the necessary datasets with the service account. +4. Configure your provider using the `service_account_key`, `project_id`, and `dataset`. + +## Example of usage + +```yaml +workflow: + id: bigquery-example + description: BigQuery example + triggers: + - type: manual + actions: + - name: bigquery + provider: + type: bigquery + config: "{{ providers.bigquerytest }}" + with: + query: "SELECT * FROM `my_dataset.my_table` WHERE condition = 'value'" + dataset: "my_dataset" + project_id: "my_project_id" + +##Usefull Links +-[BigQuery Documentation](https://cloud.google.com/bigquery/docs) \ No newline at end of file diff --git a/docs/providers/documentation/centreon-provider.mdx b/docs/providers/documentation/centreon-provider.mdx new file mode 100644 index 0000000000..90801fb464 --- /dev/null +++ b/docs/providers/documentation/centreon-provider.mdx @@ -0,0 +1,26 @@ +--- +title: "Centreon" +sidebarTitle: "Centreon Provider" +description: "Centreon allows you to monitor your infrastructure with ease." +--- + +## Authentication Parameters + +The Centreon provider requires the following authentication parameters: + +- `Centreon Host URL`: The URL of the Centreon instance. Example: `https://centreon.example.com`. +- `Centreon API Token`: The API token of an admin user. + +## Connecting with the Provider + +1. Centreon can be SaaS or On-premises. You need to have an instance of Centreon running. +2. Go to Administration > API Tokens and create a new token for an admin user. +3. Use the URL of your Centreon instance and the API token to configure the provider. + +## Usefull Links + +- [Centreon](https://www.centreon.com/) + +## Note + +- Centreon only supports the following [host state](https://docs.centreon.com/docs/api/rest-api-v1/#realtime-information) (UP = 0, DOWN = 2, UNREA = 3) diff --git a/docs/providers/documentation/clickhouse-provider.mdx b/docs/providers/documentation/clickhouse-provider.mdx new file mode 100644 index 0000000000..5afd1d7c41 --- /dev/null +++ b/docs/providers/documentation/clickhouse-provider.mdx @@ -0,0 +1,29 @@ +--- +title: 'ClickHouse' +sidebarTitle: 'ClickHouse Provider' +description: 'ClickHouse provider allows you to interact with ClickHouse database.' +--- + +## Overview + +ClickHouse is an open-source column-oriented DBMS for online analytical processing that allows users to generate analytical reports using SQL queries in real-time. + +## Authentication Parameters + +The ClickHouse provider requires the following authentication parameters: + +- `Clickhouse Username`: The username to authenticate with ClickHouse. +- `Clickhouse Password`: The password to authenticate with ClickHouse. +- `Clickhouse Hostname`: The host where ClickHouse is running. +- `Clickhouse Port`: The port where ClickHouse is running. The default port is `9000`. +- `Clickhouse Database`: The database to connect to. + +## Connecting with the ClickHouse provider + +1. Obtain the required authentication parameters. +2. Add ClickHouse provider to your keep account and configure with the above authentication parameters. + +## Useful Links + +- [ClickHouse](https://clickhouse.com/) +- [ClickHouse Statements](https://clickhouse.com/docs/en/sql-reference/statements/) diff --git a/docs/providers/documentation/cloudwatch-provider.mdx b/docs/providers/documentation/cloudwatch-provider.mdx new file mode 100644 index 0000000000..6d2506f4fe --- /dev/null +++ b/docs/providers/documentation/cloudwatch-provider.mdx @@ -0,0 +1,114 @@ +--- +title: "CloudWatch" +sidebarTitle: "CloudWatch Provider" +description: "CloudWatch provider enables seamless integration with AWS CloudWatch for alerting and monitoring, directly pushing alarms into Keep." +--- + +## Overview + +The CloudWatch Provider offers a direct integration with AWS CloudWatch, enabling Keep users to receive CloudWatch alarms within the Keep platform. This integration centralizes the monitoring and alerting capabilities, allowing for timely responses to changes in the infrastructure or application health. + +### Key Features: + +- **Webhook Integration**: Facilitates automatic subscription to AWS SNS topics linked with CloudWatch alarms, ensuring that Keep is notified of all relevant alarms. +- **Support for Custom SNS Topics**: Allows the use of both pre-existing SNS topics and the specification of custom SNS topics for alarm notifications. +- **Broad Monitoring Scope**: Utilizes CloudWatch's comprehensive alarm system to monitor application and infrastructure health. +- **Adaptable Authentication**: Accommodates both permanent and temporary AWS credentials to suit various security and operational requirements. + +## Connecting with the Provider + +To integrate CloudWatch with Keep, you'll need the following: + +- An AWS account with permissions to access CloudWatch and SNS services. +- A configured Keep account with API access. +- Appropriate AWS IAM permissions for the CloudWatch provider. + +## Required AWS IAM Permissions (Scopes) + +To ensure the CloudWatch provider operates seamlessly, certain AWS IAM permissions (referred to as "scopes") are necessary. These scopes enable the provider to perform actions such as reading alarm details, updating alarm configurations, and subscribing to SNS topics. Below is a list of the required scopes along with explanations: + +### Mandatory Scopes + +- **`cloudwatch:DescribeAlarms`** + - **Description**: Necessary to retrieve information about CloudWatch alarms. + - **Documentation**: [API_DescribeAlarms](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) + - **Alias**: Describe Alarms + - **Mandatory**: Yes + - This scope is crucial for the provider to fetch and list all CloudWatch alarms. + +### Optional Scopes + +- **`cloudwatch:PutMetricAlarm`** + - **Description**: Required to update alarm configurations, particularly to add Keep as an SNS action on alarms. + - **Documentation**: [API_PutMetricAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html) + - **Alias**: Update Alarms + - This scope allows the modification of existing CloudWatch alarms to integrate with Keep notifications. + +- **`sns:ListSubscriptionsByTopic`** + - **Description**: Allows listing all subscriptions for a given SNS topic, enabling Keep to subscribe itself. + - **Documentation**: [SNS Access Policy](https://docs.aws.amazon.com/sns/latest/dg/sns-access-policy-language-api-permissions-reference.html) + - **Alias**: List Subscriptions + - Essential for the provider to manage subscriptions to SNS topics for alarm notifications. + +- **`logs:GetQueryResults`** + - **Description**: Required for retrieving the results of CloudWatch Logs Insights queries. + - **Documentation**: [API_GetQueryResults](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) + - **Alias**: Read Query Results + - Enables the provider to fetch query results from CloudWatch Logs Insights. + +- **`logs:DescribeQueries`** + - **Description**: Necessary to describe the results of CloudWatch Logs Insights queries. + - **Documentation**: [API_DescribeQueries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueries.html) + - **Alias**: Describe Query Results + - This scope is used to access detailed information about queries executed in CloudWatch Logs Insights. + +- **`logs:StartQuery`** + - **Description**: Allows starting CloudWatch Logs Insights queries. + - **Documentation**: [API_StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) + - **Alias**: Start Logs Query + - Critical for initiating logs analysis and queries within CloudWatch Logs Insights. + +- **`iam:SimulatePrincipalPolicy`** + - **Description**: Permits Keep to test the scopes of the current IAM role without making any resource modifications. + - **Documentation**: [API_SimulatePrincipalPolicy](https://docs.aws.amazon.com/IAM/latest/APIReference/API_SimulatePrincipalPolicy.html) + - **Alias**: Simulate IAM Policy + - This scope is useful for verifying the permissions associated with the IAM role used by Keep, ensuring it has the necessary access without altering any AWS resources. + +While some scopes are optional, having them configured can enhance the integration capabilities and provide a more comprehensive monitoring solution within Keep. + +### Authentication Configuration + +Connecting CloudWatch to Keep requires: + +- **AWS Access Key & Secret**: Your AWS credentials with access to CloudWatch and SNS. +- **Region**: The AWS region your CloudWatch alarms and SNS topics reside in. +- **Session Token** (optional): Necessary for temporary AWS credentials. +- **CloudWatch SNS Topic** (optional): An ARN or name of the SNS topic for sending notifications. Optional if your alarms are already configured with an SNS topic. + +## Setting Up the Integration + +For a seamless setup process, ensure your AWS IAM roles are properly configured with the necessary permissions for CloudWatch and SNS access. + +### Steps: + +1. **Configure AWS IAM Roles**: Ensure the IAM role used by the CloudWatch provider has permissions for `cloudwatch:DescribeAlarms`, `cloudwatch:PutMetricAlarm`, `sns:ListSubscriptionsByTopic`, and other relevant actions. +2. **Specify Authentication Details**: In the Keep platform, enter the AWS Access Key, Secret, and Region details in the CloudWatch provider configuration. +3. **Set Up SNS Topic (Optional)**: If using a custom SNS topic, specify its ARN or name in the provider configuration. Keep will use this topic to listen for alarm notifications. +4. **Activate the Provider**: Finalize the setup in Keep to start receiving CloudWatch alarms. + +## Troubleshooting + +- Ensure the AWS credentials provided have the correct permissions and are not expired. +- Verify that the SNS topics are correctly configured to send notifications to Keep. +- Check the CloudWatch alarms to ensure they are active and correctly configured to trigger under the desired conditions. + +## Webhook Integration Modifications + +The webhook integration for CloudWatch adds Keep as a subscriber to the SNS topics associated with CloudWatch alarms. This integration allows Keep to receive notifications for all alarms triggered within the AWS environment. +The integration automatically gains access to the following scopes within CloudWatch: +- `cloudwatch:DescribeAlarms` + +## Useful Links + +- [AWS CloudWatch Documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/WhatIsCloudWatch.html) +- [AWS SNS Documentation](https://docs.aws.amazon.com/sns/latest/dg/welcome.html) diff --git a/docs/providers/documentation/console-provider.mdx b/docs/providers/documentation/console-provider.mdx new file mode 100644 index 0000000000..4ef5a3c9ff --- /dev/null +++ b/docs/providers/documentation/console-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "Console" +sidebarTitle: "Console Provider" +description: "Console provider is sort of a mock provider that projects given alert message to the console." +--- + +## Inputs + +- message: The alert message to print to the console + +## Outputs + +This provider has no outputs + +## Authentication Parameters + +This provider has no authentication + +## Connecting with the Provider + +This provider doesn't require any connection + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Useful Links + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Example + +```python +config = { + "description": "Console Output Provider", + "authentication": {}, +} +provider = ProvidersFactory.get_provider( + provider_id='mock', provider_type="console", provider_config=config +) +provider.notify( + message="Simple alert showing context with name: {name}".format( + name="John Doe" + ) +) +``` + +![](/images/console_provider_example.png) diff --git a/docs/providers/documentation/coralogix-provider.mdx b/docs/providers/documentation/coralogix-provider.mdx new file mode 100644 index 0000000000..ff29491bd4 --- /dev/null +++ b/docs/providers/documentation/coralogix-provider.mdx @@ -0,0 +1,70 @@ +--- +title: 'Coralogix' +sidebarTitle: 'Coralogix Provider' +description: 'Coralogix provider allows you to send alerts from Coralogix to Keep using webhooks.' +--- + +## Overview + +Coralogix is a modern observability platform delivers comprehensive visibility into all your logs, metrics, traces and security events with end-to-end monitoring. + +## Connecting Coralogix to Keep + +To connect Coralogix to Keep, you need to configure it as a webhook from Coralogix. Follow the steps below to set up the integration: + +1. From the Coralogix toolbar, navigate to Data Flow > Outbound Webhooks. + + + + + +2. In the Outbound Webhooks section, click Generic Webhook. + + + + + +3. Click Add New. + + + + + +4. Enter a webhook name and set the URL to `https://api.keephq.dev/alerts/event/coralogix`. +5. Select HTTP method (POST). + + + + + +6. Generate an API key with webhook role from the [Keep settings](https://platform.keephq.dev/settings?selectedTab=api-key). Copy the API key and paste it in the request header in the next step. + + + + + +7. Add a request header with the key "x-api-key" and API key as the value in coralogix webhook configuration. + + + + + +8. Edit the body of the messages that will be sent when the webhook is triggered (optional). +9. Save the configuration. + +## Useful Links + +- [Coralogix Website](https://coralogix.com/) + diff --git a/docs/providers/documentation/datadog-provider.mdx b/docs/providers/documentation/datadog-provider.mdx new file mode 100644 index 0000000000..6aee49f465 --- /dev/null +++ b/docs/providers/documentation/datadog-provider.mdx @@ -0,0 +1,77 @@ +--- +title: "Datadog" +sidebarTitle: "Datadog Provider" +description: "Datadog provider allows you to query Datadog metrics and logs for monitoring and analytics." +--- + +## Inputs + +- `query`: str: The query string to search within Datadog metrics and logs. +- `time_range`: dict = None: The time range for the query (e.g., `{'from': 'now-15m', 'to': 'now'}`) +- `source`: str = None: The source type (metrics, traces, logs). + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ + +## Authentication Parameters + +The `api_key` and `app_key` are required for connecting to the Datadog provider. You can obtain them as described in the "Connecting with the Provider" section. + +## Connecting with the Provider + +### API Key + +To obtain the Datadog API key, follow these steps: + +1. Log in to your Datadog account. +2. Navigate to the "Integrations" section. +3. Click on the "API" tab. +4. Generate a new API Key. + +### App Key + +To obtain the Datadog App Key, follow these steps: + +1. Log in to your Datadog account. +2. Navigate to the "Integrations" section. +3. Click on the "API" tab. +4. Generate a new App Key or use an existing one. + +## Fingerprinting + +Fingerprints in Datadog are calculated based on the `groups` and `monitor_id` fields of an incoming/pulled event. + +## Scopes + +Certain scopes may be required to perform specific actions or queries via the Datadog Provider. Below is a summary of relevant scopes and their use cases: + +- monitors_read (Monitors Read) + Required: True + Description: View monitors. +- monitors_write (Monitors Write) + Required: False + Description: Write monitors. (\*_Required for auto-webhook integration_) +- create_webhooks (Integrations Manage) + Required: False + Description: Create webhooks integrations. (\*_Required for auto-webhook integration_) +- metrics_read + Required: False + Description: View metrics. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ + +## Useful Links + +- [Datadog API Documentation](https://docs.datadoghq.com/api/) +- [Datadog Query Language](https://docs.datadoghq.com/dashboards/querying/) + +## Webhook Integration Modifications + +The webhook integration adds Keep as a monitor within Datadog. It can be found under the "Monitors" section. +The integration automatically gains access to the following scopes within Datadog: +- `monitors_read` +- `monitors_write` +- `create_webhooks` diff --git a/docs/providers/documentation/discord-provider.mdx b/docs/providers/documentation/discord-provider.mdx new file mode 100644 index 0000000000..f75da282b1 --- /dev/null +++ b/docs/providers/documentation/discord-provider.mdx @@ -0,0 +1,57 @@ +--- +title: "Discord" +sidebarTitle: "Discord Provider" +description: "Discord provider is a provider that allows to send notifications to Discord" +--- + +## Inputs + +- content: str : Message text to send +- components: list[dict] = []: Adding styling or interactive components like emoji,buttons + +Note: for components to work, the webhook must be owned by an application - see https://discord.com/developers/docs/resources/webhook#execute-webhook + + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ + +## Authentication Parameters + +The `webhook_url` associated with the channel requires to trigger the message to the respective channel. + +## Connecting with the Provider + +- Open the Discord server where you want to create the webhook. +- Click on the settings icon next to the server name, and select "Server Settings." +- In the left-hand menu, click on "Integrations," and then click on "Webhooks." +- Click the "Create Webhook" button, and give your webhook a name. + +## Example of usgae + +``` +workflow: + id: discord-example + description: Discord example + triggers: + - type: manual + actions: + - name: discord + provider: + type: discord + config: "{{ providers.discordtest }}" + with: + content: Alerta! + components: + - type: 1 # Action row + components: + - type: 2 # Button + style: 1 # Primary style + label: "Click Me!" + custom_id: "button_click" + +``` + +## Useful Links + +- https://discord.com/developers/docs/resources/webhook#execute-webhook diff --git a/docs/providers/documentation/dynatrace-provider.mdx b/docs/providers/documentation/dynatrace-provider.mdx new file mode 100644 index 0000000000..3be5038c91 --- /dev/null +++ b/docs/providers/documentation/dynatrace-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "Dynatrace" +sidebarTitle: "Dynatrace Provider" +description: "Dynatrace provider allows integration with Dynatrace for monitoring, alerting, and collecting metrics." +--- + +## Inputs + +- `metric_key`: str : The key of the Dynatrace metric to query. +- `time_range`: str (optional) : Time range for the query (e.g., `last30mins`, `last24hours`, etc.) +- `filters`: dict (optional) : Filters to apply to the Dynatrace query (e.g., entityId, host). + +## Outputs + +- `result`: The result of the Dynatrace metric query, returned in a JSON format. + +## Authentication Parameters + +- `api_token`: Dynatrace API token required to authenticate requests. +- `dynatrace_url`: URL of the Dynatrace environment (e.g., `https://.live.dynatrace.com`). + +## Connecting with the Provider + +1. Log in to your Dynatrace account and navigate to "Settings" โ†’ "Integration" โ†’ "Dynatrace API." +2. Generate an API token with appropriate permissions (e.g., Read metrics). +3. Get your environment's Dynatrace URL. +4. Configure the Dynatrace provider using the API token and Dynatrace URL. + +## Example of usage + +```yaml +workflow: + id: dynatrace-example + description: Dynatrace example + triggers: + - type: manual + actions: + - name: dynatrace + provider: + type: dynatrace + config: "{{ providers.dynatracetest }}" + with: + metric_key: "builtin:host.cpu.usage" + time_range: "last24hours" + filters: + entityId: "HOST-12345" +## Useful Links +-[Dynatrace API Documentation](https://docs.dynatrace.com/docs/dynatrace-api) \ No newline at end of file diff --git a/docs/providers/documentation/elastic-provider.mdx b/docs/providers/documentation/elastic-provider.mdx new file mode 100644 index 0000000000..d1b8ea4ece --- /dev/null +++ b/docs/providers/documentation/elastic-provider.mdx @@ -0,0 +1,45 @@ +--- +title: "Elastic" +sidebarTitle: "Elastic Provider" +description: "Elastic provider is a provider used to query Elastic Search (tested with elastic.co)" +--- + +## Inputs + +- query: str | dict: The query to search Elastic Search with (either SQL/EQL) +- index: str = None: The index to search on (**If index is None, query must be SQL**) + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Authentication Parameters + +The `api_key` and `cloud_id` are required for connecting to the Elastic provider. You can obtain them as described in the "Connecting with the Provider" section. + +## Connecting with the Provider + +### API Key + +To obtain the Elastic API key, follow these steps: + +1. Log in to your elastic.co account +2. Go to the "Elasticsearch Service" section +3. Click on the "API Key" button +4. Generate a new API Key + +### Cloud ID + +To obtain the Elastic Cloud ID, follow these steps: + +1. Log in to your elastic.co account +2. Go to the "Elasticsearch Service" section +3. Find the "Cloud ID" in the Overview page. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Useful Links + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ diff --git a/docs/providers/documentation/gcpmonitoring-provider.mdx b/docs/providers/documentation/gcpmonitoring-provider.mdx new file mode 100644 index 0000000000..8a31e79c0a --- /dev/null +++ b/docs/providers/documentation/gcpmonitoring-provider.mdx @@ -0,0 +1,78 @@ +--- +title: "GCP Monitoring" +sidebarTitle: "GCP Monitoring Provider" +description: "GCP Monitoringing provider allows you to get alerts from Azure Monitoring via webhooks." +--- + +## Overview +The GCP Monitoring Provider enables seamless integration between Keep and GCP Monitoring, allowing alerts from GCP Monitoring to be directly sent to Keep through webhook configurations. This integration ensures that critical alerts are efficiently managed and responded to within Keep's platform. + +## Connecting GCP Monitoring to Keep +To connect GCP Monitoring to Keep, you'll need to configure a webhook as a notification channel in GCP Monitoring and then link it to the desired alert policy. + +### Step 1: Access Notification Channels +Log in to the Google Cloud Platform console. +Navigate to **Monitoring > Alerting > Notification channels**. + + + + + +### Step 2: Add a New Webhook +Within the Webhooks section, click on **ADD NEW**. + + + + + +### Step 3: Configure the Webhook +In the Endpoint URL field, enter the webhook URL provided by Keep. +- For Display Name, use keep-gcpmonitoring-webhook-integration. +- Enable Use HTTP Basic Auth and input the following credentials: + - Auth Username: **api_key** + - Auth Password: **%YOURAPIKEY%** + + + + + +### Step 4: Save the Webhook Configuration +- Click on Save to store the webhook configuration. + +### Step 5: Associate the Webhook with an Alert Policy + +Navigate to the alert policy you wish to send notifications from to Keep. +- Click on Edit. +- Under "Notifications and name," find the Notification Channels section and select the keep-gcpmonitoring-webhook-integration channel you created. +- Save the changes by clicking on SAVE POLICY. + + + + + + + + + + +### Step 6: Review the alert in Keep + + + + + +### Useful Links + - [GCP Monitoring Notification Channels](https://cloud.google.com/monitoring/support/notification-options) + - [GCP Monitoring Alerting](https://cloud.google.com/monitoring/alerts) diff --git a/docs/providers/documentation/github-provider.mdx b/docs/providers/documentation/github-provider.mdx new file mode 100644 index 0000000000..425fd09f7c --- /dev/null +++ b/docs/providers/documentation/github-provider.mdx @@ -0,0 +1,50 @@ +--- +title: "GitHub" +sidebarTitle: "GitHub Provider" +description: "GitHub provider allows integration with GitHub for managing repositories, issues, pull requests, and more." +--- + +## Inputs + +- `repo`: str : The name of the repository (e.g., `user/repo-name`) +- `action`: str : The action to perform (e.g., `create_issue`, `close_pr`) +- `issue_title`: str (optional) : The title for a new issue (required for `create_issue` action) +- `issue_body`: str (optional) : The body content for the issue (optional but recommended for `create_issue`) +- `pr_number`: int (optional) : The pull request number (required for `close_pr` action) + +## Outputs + +- `result`: The result of the GitHub API call, returned as a dictionary. + +## Authentication Parameters + +- `github_token`: A personal access token (PAT) from GitHub to authenticate API requests. + - You can generate a token at [GitHub Tokens](https://github.com/settings/tokens). + +## Connecting with the Provider + +1. Go to your GitHub account and navigate to **Settings > Developer Settings > Personal Access Tokens**. +2. Generate a token with the required permissions (e.g., `repo`, `workflow`, etc.). +3. Copy the token and provide it as `github_token` in the provider configuration. + +## Example of usage + +```yaml +workflow: + id: github-example + description: GitHub example + triggers: + - type: manual + actions: + - name: github + provider: + type: github + config: "{{ providers.githubtest }}" + with: + repo: "user/repo-name" + action: "create_issue" + issue_title: "New Issue Title" + issue_body: "Description of the issue." + +## Useful Links +-[GitHub REST API Documentation](https://docs.github.com/en/rest?apiVersion=2022-11-28) \ No newline at end of file diff --git a/docs/providers/documentation/github_workflows_provider.mdx b/docs/providers/documentation/github_workflows_provider.mdx new file mode 100644 index 0000000000..a17721aeea --- /dev/null +++ b/docs/providers/documentation/github_workflows_provider.mdx @@ -0,0 +1,49 @@ +--- +title: "Github Workflows" +sidebarTitle: "Github Workflows Provider" +description: "GithubWorkflowProvider is a provider that interacts with Github Workflows API." +--- + +## Configuration + +The `kwargs` of the `notify` function in **GithubWorkflowProvider** contains the following Parameters +```python +kwargs(dict): + github_url(str): API endpoint to send the request to. (Required*) + github_method(str): GET | POST | DELETE | PUT +``` +Basically the kwargs will be automatically populated by the variables passed under `with` in the workflow file. + +## Outputs + +It returns the the response of the query. + +### Authentication Parameters + +A Github Personal Access Token `GITHUB_PAT` associated with the github account is required to perform the required action. + +## Connecting with the Provider + +Create your personal access token (classic) in github +- In the upper-right corner of any page, click your profile photo, then click **Settings**. +- In the left sidebar, click **Developer settings**. +- In the left sidebar, under Personal access tokens, click **Tokens (classic)**. +- Select Generate new token, then click Generate new **token (classic)**. +- In the "Note" field, give your token a descriptive name. +- To give your token an expiration, select **Expiration**, then choose a default option or click **Custom** to enter a date. +- Select the scopes you'd like to grant this token. +- Click **Generate token**. +- Optionally, to copy the new token to your clipboard, click copy button. + +See bellow for more info. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ + +## Useful Links + +- [Workflows](https://docs.github.com/en/rest/actions/workflows) +- [Workflows runs](https://docs.github.com/en/rest/actions/workflow-runs) +- [Workflows jobs](https://docs.github.com/en/rest/actions/workflow-jobs) +- [Managing your personal access tokens](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) diff --git a/docs/providers/documentation/gitlab-provider.mdx b/docs/providers/documentation/gitlab-provider.mdx new file mode 100644 index 0000000000..65011c8bc2 --- /dev/null +++ b/docs/providers/documentation/gitlab-provider.mdx @@ -0,0 +1,36 @@ +--- +title: "GitLab Provider" +sidebarTitle: "GitLab Provider" +description: "GitLab provider is a provider used for creating issues in GitLab" +--- + +## Inputs + +The `notify` function take following parameters as inputs: + +- `id` (required): The global ID or path of the project. +- `title` (required): Title of the Issue/Ticket. +- `description` (optional): Description for the Issue. +- `labels` (optional): Issue labels seperated by a Comma. +- `issue_type` (optional): Issue type name. One of `issue`, `incident`, `test_case` or `task`. Default is `issue`. + +See [documentation](https://docs.gitlab.com/ee/api/issues.html#new-issue) for more + +## Authentication Parameters +The GitLab provider requires the following authentication parameter: + +- `host` (required): GitLab host name of the project. +- `Personal Access Token` (required): Your Personal Access Token with `api` scope. + +See [GitLab Scopes](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#personal-access-token-scopes) for more. + +## Connecting with the Provider + +1. Go to [Personal Access Token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token) to see how to create a personal_access_token. +2. Get `host`, eg: if you're using Cloud GitLab, use: `https://gitlab.com` or use your `host` if you're using onPrem. + + +## Useful Links + +- [GitLab PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token) +- [GitLab Create New Issue](https://docs.gitlab.com/ee/api/issues.html#new-issue) diff --git a/docs/providers/documentation/gitlabpipelines-provider.mdx b/docs/providers/documentation/gitlabpipelines-provider.mdx new file mode 100644 index 0000000000..e112e3f799 --- /dev/null +++ b/docs/providers/documentation/gitlabpipelines-provider.mdx @@ -0,0 +1,42 @@ +--- +title: "Gitlab Pipelines" +sidebarTitle: "Gitlab Pipelines Provider" +description: "GitlabPipelinesProvider is a provider that interacts with GitLab Pipelines API." +--- + +## Inputs + +The `kwargs` of the `notify` function in **GitlabPipelinesProvider** contains the following Parameters +```python +kwargs(dict): + gitlab_url(str): API endpoint to send the request to. (Required*) + gitlab_method(str): GET | POST | DELETE | PUT +``` +Basically the kwargs will be automatically populated by the variables passed under `with` in the workflow file. + +## Outputs + +It prints the output in accordance with the response in the following format `Sent {method} request to {url} with status {response_status}` + +## Authentication Parameters + +A Gitlab Personal Access Token `GITLAB_PAT` associated with the gitlab account is required to perform the required action. + +## Connecting with the Provider + +Create your personal access token in gitlab +- On the left sidebar, select your avatar. +- Select **Edit profile**. +- On the left sidebar, select **Access Tokens**. +- Select Add **new token**. +- Enter a **name** and **expiry date** for the token. +- Select the desired scopes. +- Select Create **personal access token**. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ + +## Useful Links + +- https://docs.gitlab.com/ee/api/pipelines.html diff --git a/docs/providers/documentation/gke-provider.mdx b/docs/providers/documentation/gke-provider.mdx new file mode 100644 index 0000000000..416d6301ed --- /dev/null +++ b/docs/providers/documentation/gke-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "GKE" +sidebarTitle: "GKE Provider" +description: "GKE provider allows managing Google Kubernetes Engine clusters and related resources." +--- + +## Inputs + +- `cluster_name`: str : The name of the GKE cluster to manage +- `action`: str : The action to perform on the cluster (e.g., `create`, `delete`, `scale`) +- `node_count`: int (optional) : The number of nodes (used in scaling the cluster) + +## Outputs + +- `status`: The status of the action performed on the GKE cluster, returned as a response message. + +## Authentication Parameters + +- `gcp_credentials`: JSON containing Google Cloud credentials with the necessary permissions to manage GKE clusters. +- `project_id`: Google Cloud project ID where the GKE cluster is deployed. +- `zone`: The zone where the GKE cluster is hosted. + +## Connecting with the Provider + +1. Obtain Google Cloud credentials by following the steps in [Google Cloud's service account guide](https://cloud.google.com/iam/docs/creating-managing-service-account-keys). +2. Ensure your service account has the necessary permissions to manage GKE clusters (`roles/container.admin`). +3. Provide the `gcp_credentials`, `project_id`, and `zone` in your provider configuration. + +## Example of usage + +```yaml +workflow: + id: gke-example + description: GKE example + triggers: + - type: manual + actions: + - name: gke + provider: + type: gke + config: "{{ providers.gketest }}" + with: + cluster_name: "my-cluster" + action: "create" + node_count: 3 + +## Usefull Links +-[Google Kubernetes Engine Documentation](https://cloud.google.com/kubernetes-engine/docs) \ No newline at end of file diff --git a/docs/providers/documentation/google_chat-provider.mdx b/docs/providers/documentation/google_chat-provider.mdx new file mode 100644 index 0000000000..486e81856d --- /dev/null +++ b/docs/providers/documentation/google_chat-provider.mdx @@ -0,0 +1,34 @@ +--- +title: "Google Chat" +sidebarTitle: "Google Chat Provider" +description: "Google Chat provider is a provider that allows to send messages to Google Chat" +--- + +## Inputs + +The `notify` function take following parameters as inputs: + +- `message`: Required. Message text to send to Google Chat + +## Outputs + +## Authentication Parameters + +The webhook_url associated with the channel requires to trigger the message to the respective Google Chat space. + +## Connecting with the Provider + +1. Open Google Chat +2. Open the space to which you want to add a webhook +3. Next to the space title, click the expand more arrow, and then click "Apps & Integrations" +4. Click "+ Add webhooks" +5. In the Name field, enter "Quickstart Webhook" +6. In the Avatar URL field, enter https://developers.google.com/chat/images/chat-product-icon.png +7. Click Save +8. To copy the webhook URL, click "More", and then click "Copy link". + +## Notes + +## Useful Links + +- https://developers.google.com/chat/how-tos/webhooks diff --git a/docs/providers/documentation/grafana-provider.mdx b/docs/providers/documentation/grafana-provider.mdx new file mode 100644 index 0000000000..e559d005b1 --- /dev/null +++ b/docs/providers/documentation/grafana-provider.mdx @@ -0,0 +1,99 @@ +--- +title: "Grafana Provider" +description: "Grafana Provider allows either pull/push alerts from Grafana to Keep." +--- +Grafana currently supports pulling/pushing alerts. We will add querying and notifying soon. + +## Inputs + +Grafana Provider does not currently support the `notify` function. + +## Outputs + +Grafana Provider does not currently support the `query` function. + +## Authentication Parameters + +The Grafana Provider uses API token authentication. You need to provide the following authentication parameters to connect to Grafana: + +- **token** (required): Your Grafana API Token. +- **host** (required): The URL of your Grafana host (e.g., https://keephq.grafana.net). + +## Connecting with the Provider + +To connect to Grafana, you need to create an API Token: + +1. Log in to your Grafana account. +2. Go to the **Service Accounts** page (cmd+k -> service). +3. Click the **Add service account** button and provide a name for your service account. +4. Grant "alerting" permissions: + + + + + +5. Now generate Service Account Token: + + + + +6. Use the token value in the `authentication` section in the Grafana Provider configuration. + +## Post Installation Validation + +You can check that the Grafana Provider works by testing Keep's contact point (which was installed via the webhook integration). + +1. Go to **Contact Points** (cmd k -> contact). +2. Find the **keep-grafana-webhook-integration**: + + + + +3. Click on the **View contact point**: + + + + +4. Click on **Test**: + + + + +5. Go to Keep โ€“ you should see an alert from Grafana! + +**Alternative Validation Methods (When Keep is Not Accessible Externally):** + +If Keep is not accessible externally and the webhook cannot be created, you can manually validate the Grafana provider setup using the following methods: + +1. **Manual Test Alerts in Grafana:** + - Create a manual test alert in Grafana. + - Set up a contact point within Grafana that would normally send alerts to Keep. + - Trigger the alert and check Grafana's logs for errors or confirmation that the alert was sent. + +2. **Check Logs in Grafana:** + - Access Grafanaโ€™s log files or use the **Explore** feature to query logs related to the alerting mechanism. + - Ensure there are no errors related to the webhook integration and that alerts are processed correctly. + +3. **Verify Integration Status:** + - Navigate to the **Alerting** section in Grafana. + - Confirm that the integration status shows as active or functioning. + - Monitor any outbound HTTP requests to verify that Grafana is attempting to communicate with Keep. + +4. **Network and Connectivity Check:** + - Use network monitoring tools to ensure Grafana can reach Keep or any alternative endpoint configured for alerts. + +## Webhook Integration Modifications + +The webhook integration adds Keep as a contact point in the Grafana instance. This integration can be located under the "Contact Points" section. Keep also gains access to the following scopes: +- `alert.provisioning:read` +- `alert.provisioning:write` diff --git a/docs/providers/documentation/grafana_incident-provider.mdx b/docs/providers/documentation/grafana_incident-provider.mdx new file mode 100644 index 0000000000..a59d13df70 --- /dev/null +++ b/docs/providers/documentation/grafana_incident-provider.mdx @@ -0,0 +1,32 @@ +--- +title: 'Grafana Incident Provider' +sidebarTitle: 'Grafana Incident Provider' +description: 'Grafana Incident Provider alows you to query all incidents from Grafana Incident.' +--- + +## Authentication Parameters + +The Grafana Incident provider requires the following authentication parameters: + +- `host_url` - The URL of the Grafana Incident instance. + Example: `https://your-stack.grafana.net` +- `service_account_token` - The service account token is used to authenticate the Grafana Incident API requests. + +## Getting started + +1. In your Grafana Cloud stack, click Alerts & IRM in the left-side menu. +2. Click the Incident tile to enable the app for your Grafana Cloud instance. +3. Once Grafana Incident is enabled it is accessible to users in your organization. + +## Connecting with the Provider + +1. After enabling the Grafana Incident app, navigate Adminstration > Users and access > Service Accounts. +2. Create a new service account by clicking the Add Service Account button. +3. Give the service account a name and assign role as Viewer. +4. Click on Add service account token and click on Generate token. +5. Copy the generated token. +6. This will be used as the `service_account_token` parameter in the provider configuration. + +## Usefull Links + +- [Grafana Incident](https://grafana.com/docs/grafana-cloud/alerting-and-irm/incident/) diff --git a/docs/providers/documentation/grafana_oncall-provider.mdx b/docs/providers/documentation/grafana_oncall-provider.mdx new file mode 100644 index 0000000000..4e8160873a --- /dev/null +++ b/docs/providers/documentation/grafana_oncall-provider.mdx @@ -0,0 +1,101 @@ +--- +title: "Grafana OnCall Provider" +description: "Grafana Oncall Provider is a class that allows to ingest/digest data from Grafana OnCall." +--- + +## Inputs + +- **title** (required): The title of the incident. +- **roomPrefix** (optional): Prefix for the incident room (default: "incident"). +- **labels** (optional): List of labels to associate with the incident (default: ["keep-generated"]). +- **isDrill** (optional): Whether the incident is a drill or not (default: False). +- **severity** (optional): Severity of the incident (default: "minor"). +- **status** (optional): Status of the incident (default: "active"). +- **attachCaption** (optional): Caption for any attachment. +- **attachURL** (optional): URL for any attachment. +- **incidentID** (optional): ID of an existing incident to update. + +## Outputs + +Grafana Oncall Provider does not currently support the `query` function. + +## Authentication Parameters + +The Grafana Oncall Provider uses API token authentication. You need to provide the following authentication parameters to connect to Grafana OnCall: + +- **token** (required): Your Grafana OnCall API Token. +- **host** (required): The URL of your Grafana OnCall host (e.g., https://keephq.grafana.net). + +## Connecting with the Provider + +To connect to Grafana OnCall, you need to create an API Token: + +1. Log in to your Grafana OnCall account. +2. Go to the **API Tokens** page. +3. Click the **Generate Token** button and provide a name for your token. +4. Copy the token value and keep it secure. +5. Add the token value to the `authentication` section in the Grafana Oncall Provider configuration. + +## Notes + +- This provider allows you to interact with Grafana OnCall to create or update incidents. +- The `random_color` function generates a random color for incident labels. +- The `startTime` and `endTime` parameters use ISO-8601 format. +- The `notify` function returns information about the incident created or updated. + +Payload example: + +```json +{ + "incident": { + "incidentID": "4", + "severity": "minor", + "labels": [ + { + "label": "keep-generated", + "description": "keep-generated", + "colorHex": "#9E0847" + } + ], + "isDrill": false, + "createdTime": "2023-09-10T10:31:58.030369Z", + "modifiedTime": "2023-09-10T10:31:58.030369Z", + "createdByUser": { + "userID": "grafana-incident:user-64fd801847a9191105b3c2df", + "name": "Service Account: keep-tests", + "photoURL": "https://www.gravatar.com/avatar/dbb34057685b3bc2bdc2a2808ec80772?s=512&d=retro" + }, + "closedTime": "", + "durationSeconds": 0, + "status": "active", + "title": "Test Incident", + "overviewURL": "/a/grafana-incident-app/incidents/4/test-incident", + "roles": [], + "taskList": { + "tasks": [ + { + "taskID": "must-choose-severity", + "immutable": true, + "createdTime": "2023-09-10T10:31:58.005917795Z", + "modifiedTime": "2023-09-10T10:31:58.005922353Z", + "text": "Specify incident severity", + "status": "done", + "authorUser": null, + "assignedUser": null + } + ], + "todoCount": 0, + "doneCount": 1 + }, + "summary": "", + "heroImagePath": "/api/hero-images/548564/uoKQrUg5gxteZJ6SdFrMOEhBiN6JtLHLmCSqDzDD0SX93NAhe6ChvhLORmTrSqbC2SEzde7YSKa94UcRsoizm45y3nCGv7eq7Zolk0Y5MzDJrhZRkwrksitQm2eR4iEV/v3/4.png", + "incidentStart": "2023-09-10T10:31:58.030369Z", + "incidentEnd": "" + } +} +``` + +## Useful Links + +- [Grafana OnCall](https://keephq.grafana.net) +- [Grafana OnCall API Documentation](https://keephq.grafana.net/docs/api) diff --git a/docs/providers/documentation/http-provider.mdx b/docs/providers/documentation/http-provider.mdx new file mode 100644 index 0000000000..38794fef08 --- /dev/null +++ b/docs/providers/documentation/http-provider.mdx @@ -0,0 +1,34 @@ +--- +title: "HTTP Provider" +description: "HTTP Provider is a provider used to query/notify using HTTP requests" +--- + +## Inputs + +The `query` method of the `HttpProvider` class takes the following inputs: + +- `url`: The URL of the HTTP endpoint to query. +- `method`: The HTTP method to use for the query, either "GET", "POST", "PUT", or "DELETE". +- `headers`: A dictionary of headers to include in the HTTP request. +- `body`: A dictionary of data to include in the HTTP request body, only used for `POST`, `PUT` requests. +- `params`: A dictionary of query parameters to include in the URL of the HTTP request. + +## Outputs + +The `query` method returns the JSON representation of the HTTP response, if the response is JSON-encoded, otherwise it returns the response text as a string. + +## Authentication Parameters + +The `HttpProvider` class does not have any authentication parameters, but the authentication for the HTTP endpoint can be included in the headers or in the URL query parameters. + +## Connecting with the Provider + +To connect to the provider, you can instantiate an instance of the `HttpProvider` class, providing a `provider_id` and a `ProviderConfig` object. Then you can call the `query` method to query the HTTP endpoint. + +## Notes + +The code logs some debug information about the requests being sent, including the request headers, body, and query parameters. This information should not contain sensitive information, but it's important to make sure of that before using this provider in production. + +## Useful Links + +- [requests library documentation](https://docs.python-requests.org/en/latest/) diff --git a/docs/providers/documentation/ilert-provider.mdx b/docs/providers/documentation/ilert-provider.mdx new file mode 100644 index 0000000000..cd53fce113 --- /dev/null +++ b/docs/providers/documentation/ilert-provider.mdx @@ -0,0 +1,73 @@ +--- +title: "ilert Provider" +sidebarTitle: "ilert Provider" +description: "The ilert provider enables the creation, updating, and resolution of events or incidents on ilert, leveraging both incident management and event notification capabilities for effective incident response." +--- +# ilert Provider + +## Overview + +The ilert provider facilitates interaction with ilertโ€™s API, allowing for the management of incidents and events. This includes the ability to create, update, and resolve incidents, as well as send custom event notifications. This provider integrates Keep's system with ilert's robust alerting and incident management platform. + +## Inputs + +The `_type` parameter specifies the nature of the notification or action to be taken via the ilert API: + +- `incident`: This type is used for creating or updating incidents. It requires specific information such as incident summary, status, message, and details about affected services. +- `event`: This type allows for sending customized event notifications that can be configured to alert, accept, or resolve specific conditions. It supports details such as event type, summary, details about the event, custom details, and links for more context. + +Depending on the `_type` specified, the provider will route the operation to the appropriate endpoint and handle the data according to ilert's requirements for incidents or events. + +### Incident Management + +- `summary`: A brief summary of the incident. This is required for creating a new incident. +- `status`: `ilertIncidentStatus` - The current status of the incident (e.g., INVESTIGATING, RESOLVED, MONITORING, IDENTIFIED). +- `message`: A detailed message describing the incident or situation. Default is an empty string. +- `affectedServices`: A JSON string representing the list of affected services and their statuses. Default is an empty array (`"[]"`). +- `id`: The ID of the incident to update. If set to `"0"`, a new incident will be created. + +### Event Notification + +- `event_type`: Type of the event to post (`ALERT`, `ACCEPT`, `RESOLVE`) +- `details`: Detailed information about the event. +- `alert_key`: A unique key for the event to allow de-duplication. +- `priority`: `priority`: Priority level of the event (`HIGH`, `LOW`). +- `images`: List of image URLs to include with the event. +- `links`: List of related links to include with the event. +- `custom_details`: Custom key-value pairs to provide additional context. + +## Outputs + +Responses from ilert's API are JSON objects that include the status of the operation and any relevant incident or event details. + +## Authentication Parameters + +- `ALERT-SOURCE-API-KEY`: API token for authenticating with ilert's Alert Source API. +- `ilert_host`: API host URL. Default is `https://api.ilert.com/api`. + + +## Connecting with the Provider + + +### Custom Integration: Adding Keep to ilert + +To integrate Keep with ilert, follow these steps: + +1. Log in to your ilert account. +2. Navigate to "Alert Sources" under your account settings. +3. Create a new alert source specifically for Keep. +4. Note the `ALERT-SOURCE-API-KEY` provided for this alert source. + +The endpoint to make requests for Keep integration will be: +(https://api.ilert.com/api/v1/events/keep/{ALERT-SOURCE-API-KEY}) + + + +## Notes + +This provider is part of Keep's integration with ilert, designed to enhance operational resilience by enabling quick and effective incident response. + +## Useful Links + +- [ilert API Documentation](https://api.ilert.com/api-docs/?utm_campaign=Keep&utm_source=integration&utm_medium=organic) +- [ilert Alerting](https://www.ilert.com/product/reliable-actionable-alerting?utm_campaign=Keep&utm_source=integration&utm_medium=organic) diff --git a/docs/providers/documentation/incidentio-provider.mdx b/docs/providers/documentation/incidentio-provider.mdx new file mode 100644 index 0000000000..bfbccc597f --- /dev/null +++ b/docs/providers/documentation/incidentio-provider.mdx @@ -0,0 +1,53 @@ +--- +title: "Incident.io Provider" +sidebarTitle: "Incident.io Provider" +description: "The Incident.io provider enables the querying of incidents on Incident.io, leveraging incident management capabilities for effective response." +--- + +## Overview + +The Incident.io provider facilitates interaction with Incident.io's API, allowing for the management of incidents. This includes the ability to query specific incidents, retrieve all incidents, and manage incident details. This provider integrates Keep's system with Incident.io's robust incident management platform. + + +### Query Specific Incident + +- `incident_id`: The ID of the incident to be queried. Required for fetching specific incident details. + +## Outputs + +Returns the specific incident with id=`incident_id` + +## Authentication Parameters + +- `incidentIoApiKey`: API key for authenticating with Incident.io's API. + +## Scopes + +- `authenticated`: Mandatory for all operations, ensures the user is authenticated. +- `read_access`: Mandatory for querying incidents, ensures the user has read access. + +## Connecting with the Provider + +### API Key + +To use the Incident.io API: +1. Log in to your Incident.io account. +2. Navigate to the "API Keys" section under your account settings. +3. Generate a new API key or use an existing one. +4. Ensure it has `read` permissions enabled for reading and managing incidents. + +### Incident Endpoint + +The Incident.io incident endpoint allows querying and managing incidents. Operations include retrieving specific incident details or fetching a list of all incidents. This is crucial for monitoring and responding to incidents efficiently. + +For more details, refer to the [Incident.io API Documentation](https://api-docs.incident.io/). + +## Notes + +This provider is part of Keep's integration with Incident.io, designed to enhance operational resilience by enabling efficient incident management and response. + +## Useful Links + +- [Incident.io API Documentation](https://api-docs.incident.io/) +- [Incident.io Incidents](https://api-docs.incident.io/tag/Incidents-V2) +- [Incident.io Api_Keys and Permissions](https://help.incident.io/en/articles/6149651-our-api) diff --git a/docs/providers/documentation/incidentmanager-provider.mdx b/docs/providers/documentation/incidentmanager-provider.mdx new file mode 100644 index 0000000000..bdc7eaa8e8 --- /dev/null +++ b/docs/providers/documentation/incidentmanager-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "Incident Manager Provider" +sidebarTitle: "Incident Manager Provider" +--- + +# Incident Manager Provider + +The Incident Manager Provider allows you to push incidents from AWS IncidentManager to Keep. + +## Authentication Configuration + +To authenticate with the Incident Manager Provider, you need to provide the following configuration parameters: + +- `access_key`: AWS access key (required, sensitive) +- `access_key_secret`: AWS access key secret (required, sensitive) +- `region`: AWS region (required) +- `response_plan_arn`: AWS Response Plan's ARN (required, hint: Default response plan ARN to use when interacting with incidents, if not provided, we won't be able to register web hook for the incidents) +- `sns_topic_arn`: AWS SNS Topic ARN you want to be used/using in response plan (required, hint: Default SNS topic to use when creating incidents, if not provided, we won't be able to register web hook for the incidents) + +## Provider Scopes + +The Incident Manager Provider requires the following provider scopes: + +- `ssm-incidents:ListIncidentRecords`: Required to retrieve incidents. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (mandatory, alias: Describe Incidents) +- `ssm-incidents:GetResponsePlan`: Required to get response plan and register Keep as webhook. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: Update Response Plan) +- `ssm-incidents:UpdateResponsePlan`: Required to update response plan and register Keep as webhook. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: Update Response Plan) +- `iam:SimulatePrincipalPolicy`: Allow Keep to test the scopes of the current user/role without modifying any resource. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: Simulate IAM Policy) +- `sns:ListSubscriptionsByTopic`: Required to list all subscriptions of a topic, so Keep will be able to add itself as a subscription. [Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ssm-incidents.html) (optional, alias: List Subscriptions) + +## Status Map + +The Incident Manager Provider maps the following statuses: + +- "OPEN" to AlertStatus.FIRING +- "RESOLVED" to AlertStatus.RESOLVED + +## Severities Map + +The Incident Manager Provider maps the following severities: + +- 1 to AlertSeverity.CRITICAL +- 2 to AlertSeverity.HIGH +- 3 to AlertSeverity.LOW +- 4 to AlertSeverity.WARNING +- 5 to AlertSeverity.INFO + +## Notes +1. Incident Manager only throws notification when there is chatChannel attached to response plan. Make sure to add chatChannel to response plan before adding webhook \ No newline at end of file diff --git a/docs/providers/documentation/jira-on-prem-provider.mdx b/docs/providers/documentation/jira-on-prem-provider.mdx new file mode 100644 index 0000000000..b62293155c --- /dev/null +++ b/docs/providers/documentation/jira-on-prem-provider.mdx @@ -0,0 +1,7 @@ +--- +title: "Jira On-Prem Provider" +sidebarTitle: "Jira On-Prem Provider" +description: "Jira On-Prem Provider is a provider used to query data and creating issues in Jira" +--- + +Keep supports Jira OnPrem as a provider. Please check [Jira Provider](./jira-provider.md) for documentation. \ No newline at end of file diff --git a/docs/providers/documentation/jira-provider.mdx b/docs/providers/documentation/jira-provider.mdx new file mode 100644 index 0000000000..b00d669a00 --- /dev/null +++ b/docs/providers/documentation/jira-provider.mdx @@ -0,0 +1,44 @@ +--- +title: "Jira Cloud Provider" +sidebarTitle: "Jira Cloud Provider" +description: "Jira Cloud provider is a provider used to query data and creating issues in Jira" +--- + +## Inputs + +The `query` function take following parameters as inputs: + +- `host` (required): Jira host name of the project. +- `board_id` (required): Jira board id. +- `email` (required): Your accout email. + +The `notify` function take following parameters as inputs: + +- `host` (required): Jira host name of the project. +- `email` (required): Your accout email. +- `project_key` (required): Your jira project key. +- `summary` (required): Incident/issue name or short description. +- `description` (optional): Additional details related to the incident/issue. +- `issue_type` (optional): Issue type name. For example: `Story`, `Bug` etc +- `issue_id` (optional): When you want to update an existing issue, provide the issue id. + +## Outputs + +## Authentication Parameters + +The `query` and `notify` function requires an `api_token` from Jira. + +## Connecting with the Provider + +1. Go to https://id.atlassian.com/manage-profile/security/api-tokens to Create API token and generated token should be passed to jira authentication. +2. Get `host` and `board_id` from your respective board from its URL. +3. Get `project_key` from your project > settings > details. +4. `email` would be same as of your account email. + +## Notes + +## Useful Links + +- https://id.atlassian.com/manage-profile/security/api-tokens +- https://developer.atlassian.com/cloud/jira/software/rest/api-group-board/#api-rest-agile-1-0-board-boardid-issue-get +- https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issues/#api-rest-api-2-issue-post diff --git a/docs/providers/documentation/kafka-provider.mdx b/docs/providers/documentation/kafka-provider.mdx new file mode 100644 index 0000000000..5c41693d93 --- /dev/null +++ b/docs/providers/documentation/kafka-provider.mdx @@ -0,0 +1,58 @@ +--- +title: "Kafka" +sidebarTitle: "Kafka Provider" +description: "Kafka provider allows integration with Apache Kafka for producing and consuming messages." +--- + +## Inputs + +- `topic`: str : The Kafka topic to produce/consume messages from. +- `message`: str (optional) : The message to send to the Kafka topic when producing (not required for consuming). +- `action`: str : The action to perform (`produce` or `consume`). + +## Outputs + +- `result`: The result of the action. If consuming, this will return the message(s) from the Kafka topic. + +## Authentication Parameters + +- `kafka_broker`: The URL of the Kafka broker (e.g., `localhost:9092` or the broker's public URL). +- `kafka_client_id`: The client ID to authenticate the Kafka producer/consumer. +- `kafka_security_protocol`: (Optional) Security protocol for Kafka (e.g., `PLAINTEXT`, `SSL`, `SASL_SSL`). +- `kafka_sasl_mechanism`: (Optional) SASL mechanism for authentication (e.g., `PLAIN`, `SCRAM-SHA-256`). +- `kafka_username` & `kafka_password`: (Optional) Username and password for SASL authentication if required. + +## Connecting with the Provider + +1. Set up a Kafka broker (or use an existing one) and make sure it is accessible. +2. Get the broker URL (e.g., `localhost:9092` or a remote Kafka service URL). +3. (Optional) If using secure communication, provide the security protocol, SASL mechanism, username, and password. +4. Configure the provider with these parameters. + +## Example of usage + +```yaml +workflow: + id: kafka-example + description: Kafka example + triggers: + - type: manual + actions: + - name: kafka-produce + provider: + type: kafka + config: "{{ providers.kafkatest }}" + with: + topic: "example-topic" + action: "produce" + message: "Hello, Kafka!" + + - name: kafka-consume + provider: + type: kafka + config: "{{ providers.kafkatest }}" + with: + topic: "example-topic" + action: "consume" +## Usefull Links +-[Kafka Clients Documentation](https://kafka.apache.org/documentation/) \ No newline at end of file diff --git a/docs/providers/documentation/keep-provider.mdx b/docs/providers/documentation/keep-provider.mdx new file mode 100644 index 0000000000..059bdd0a08 --- /dev/null +++ b/docs/providers/documentation/keep-provider.mdx @@ -0,0 +1,42 @@ +--- +title: "Keep" +sidebarTitle: "Keep Provider" +description: "Keep provider allows you to query and manage alerts in Keep." +--- + +## Inputs + +- `query`: str : The query to retrieve alerts based on specific criteria. +- `filter`: dict : Optional filters to narrow down the query results. + +## Outputs + +- `alerts`: list : A list of alerts that match the query criteria. + +## Authentication Parameters + +To use the Keep provider, you must authenticate with an API token associated with your Keep account. This token can be generated from your Keep dashboard. + +## Connecting with the Provider + +1. Log in to your Keep account. +2. Navigate to the API section of your account dashboard and generate an API token. +3. Use this token to authenticate when querying alerts via the Keep provider. + +## Example of usage + +```yaml +workflow: + id: keep-example + description: Keep example + triggers: + - type: manual + actions: + - name: keep-query + provider: + type: keep + config: "{{ providers.keeptest }}" + with: + query: "severity:critical" + filter: + status: "open" diff --git a/docs/providers/documentation/kibana-provider.mdx b/docs/providers/documentation/kibana-provider.mdx new file mode 100644 index 0000000000..f9f0ad399b --- /dev/null +++ b/docs/providers/documentation/kibana-provider.mdx @@ -0,0 +1,93 @@ +--- +title: "Kibana" +sidebarTitle: "Kibana Provider" +description: "Kibana provider allows you get alerts from Kibana Alerting via webhooks." +--- + + + Please note that when installing Kibana with Webhook auto instrumentation, + Keep installs itself as a Connector, adds itself as an Action to all available + Kibana Alert Rules (For each alert, On status changes, when: Alert/No + Data/Recovered) and to all available Kibana Watcher rules as a Webhook action. + +For more information, feel free to reach out on our Slack Community. + + + +## Inputs + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ + +## Authentication Parameters + +The `api_key` and `kibana_host` are required for connecting to the Kibana provider. You can obtain them as described in the "Connecting with the Provider" section. +`kibana_port` can be used to override the default Kibana port (9243) + +## Connecting with the Provider + +### Kibana Host + +Simply copy the hostname from the URL bar in your browser: + +Kibana Host + +### API Key + +To obtain a Kibana API key, follow these steps: + +1. Log in to your Kibana account. +2. Click Stack Management +3. Click on Security +4. Click on API Keys + +Kibana API Keys + +1. Click on the top right `Create API key` button +2. Give the API key and indicative name (e.g. keep-api-key) +3. Make sure the `Restrict Permissions` toggle is not toggeled +4. On the bottom right corner, click on `Create API key` + +Create Kibana API Key + +6. Copy the newly created encoded API key and you're set! + +Copy Kibana API Key + +## Fingerprinting + +Fingerprints in Kibana are simply the alert instance ID. + +## Scopes + +Certain scopes may be required to perform specific actions or queries via the Datadog Provider. Below is a summary of relevant scopes and their use cases: + +- rulesSettings:read (Read alerts) + Required: True + Description: Read alerts. +- rulesSettings:write (Modify Alerts) + Required: True + Description: Modify alerts. +- actions:read (Read connectors) + Required: True + Description: Read connectors. +- actions:write (Write connectors) + Required: True + Description: Write connectors. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ + +## Useful Links + +- [Kibana Alerting](https://www.elastic.co/guide/en/kibana/current/alerting-getting-started.html) +- [Kibana Connectors](https://www.elastic.co/guide/en/kibana/current/action-types.html) diff --git a/docs/providers/documentation/kubernetes-provider.mdx b/docs/providers/documentation/kubernetes-provider.mdx new file mode 100644 index 0000000000..3d6db3d66c --- /dev/null +++ b/docs/providers/documentation/kubernetes-provider.mdx @@ -0,0 +1,41 @@ +--- +title: "Kubernetes" +description: "Kubernetes provider to perform rollout restart or list pods action." +--- + +## Inputs + +- **action** (required): Determines the which action to perform (`rollout_restart`, `list_pods`). +- **kind** (required): Kind of the object to perform rollout restart action. +- **object_name** (required): Name of the object to perform rollout restart action. +- **namespace** (required): Namespace of the object to perform rollout restart or list pods action. +- **labels** (optional): Labels to filter the pods while performing list pods action and also filters before performing rollout restart. + +## Outputs + +- **message**: Message for the action performed. + +## Authentication Parameters + +This provider offers you to authenticate with Openshift using: api_server, token and insecure. + +- **api_server** (required): The api server url of your Kubernetes cluster. +- **token** (required): The token of your service account to authenticate with Kubernetes. +- **insecure** (optional): If you want to skip the certificate verification, set this to `True` (default: True). + +## Connecting with the Provider + +To connect to Kubernetes, follow below steps: + +1. Create a service account on Kubernetes. +2. Create role/clusterrole and bind to service account using rolebinding/clusterrolebinding. +3. Get the token of service account. + +## Notes + +- This provider allows you to interact with Kubernetes to perform rollout restart or pods listing actions. + +## Useful Links + +- [Access Kubernetes Cluster](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/) + diff --git a/docs/providers/documentation/linear_provider.mdx b/docs/providers/documentation/linear_provider.mdx new file mode 100644 index 0000000000..5d5b068390 --- /dev/null +++ b/docs/providers/documentation/linear_provider.mdx @@ -0,0 +1,40 @@ +--- +title: "Linear Provider" +sidebarTitle: "Linear Provider" +description: "Linear Provider is a provider for fetching data and creating issues in Linear app." +--- + +## Inputs + +- **team_name** (required): The team name associated with the issue. +- **project_name** (required): The project name associated with the issue. +- **title** (required): The title of the incident. +- **description** (optional): Additional details of the incident. +- **priority** (optional): The priority for the incident in linear issue (numeric value within 0 to 4). + +## Outputs + +Linear Provider supports both `query` and `notify` methods. + +## Authentication Parameters + +The Linear Provider uses `api_token` for request authorization. You need to provider the following: + +- **api_token** (requires): The personal api key for your linear app. + - How to obtain: + 1. Visit the Linear app or website. + 2. Log in to your Linear account. + 3. Navigate to your account settings -. + 4. Navigate to the API page. + 5. Under Personal API keys section generate the key. + 6. Copy the generated API token. + +## Notes + +- This provider allows you to query projects for the given Linear team. +- This provider allows you to notify (create issue) inside Linear app for given project and team. + +## Useful Links + +- [Linear](https://linear.app) +- [Linear Docs](https://developers.linear.app/docs/graphql/working-with-the-graphql-api) diff --git a/docs/providers/documentation/linearb-provider.mdx b/docs/providers/documentation/linearb-provider.mdx new file mode 100644 index 0000000000..0a88dadd8f --- /dev/null +++ b/docs/providers/documentation/linearb-provider.mdx @@ -0,0 +1,52 @@ +--- +title: "LinearB" +sidebarTitle: "LinearB Provider" +description: "The LinearB provider enables integration with LinearB's API to manage and notify incidents directly through webhooks." +--- + + + The LinearB provider facilitates the automatic creation, update, and deletion of incidents in LinearB through its public API. It supports dynamic incident management based on operational events, allowing teams to synchronize their development metrics and alerts with LinearB's project management capabilities. + +For any support or questions, join our community on Slack or GitHub. + + + +## Inputs + +- `provider_id`: Unique identifier for the provider instance. +- `http_url`: The URL to be associated with the incident for direct access. +- `title`: Title of the incident. +- `teams`: JSON string of teams involved in the incident. +- `respository_urls`: JSON string of repository URLs related to the incident. +- `services`: JSON string of services affected by the incident. +- `started_at`: Incident start time in ISO format. +- `ended_at`: Incident end time in ISO format. +- `git_ref`: Git reference (branch, tag, commit) associated with the incident. + +## Outputs + +- JSON response from LinearB API indicating the success or failure of the operation. + +## Authentication Parameters + +- `api_token`: Required for authenticating with LinearB's API. This token must be kept secure as it allows access to manage incidents. + +## Connecting with the Provider + +### Obtaining an API Token + +To use the LinearB provider, you must obtain an API token from LinearB: + +1. Sign in to your LinearB account. +2. Navigate to the API settings section. +3. Generate a new API token with the appropriate permissions. +4. Securely store the API token as it is needed to configure the LinearB provider in Keep. + +### Useful Links + +- [LinearB API Reference](https://docs.linearb.io/api-overview/) diff --git a/docs/providers/documentation/mailchimp-provider.mdx b/docs/providers/documentation/mailchimp-provider.mdx new file mode 100644 index 0000000000..30a106c9ea --- /dev/null +++ b/docs/providers/documentation/mailchimp-provider.mdx @@ -0,0 +1,67 @@ +--- +title: "Mailchimp" +sidebarTitle: "Mailchimp Provider" +--- + +# Mailchimp Provider + +MailchimpProvider is a class that implements the Mailchimp API and allows email sending through Keep. + +## Inputs +The `notify` function of `MailchimpProvider` takes the following arguments: + +- `_from` (str): Required. The email address of the sender. +- `to` (str): Required. The email address of the recipient. +- `subject` (str): Required. The subject of the email. +- `html` (str): Required. The HTML body of the email. +- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. + +See [documentation](https://mailchimp.com/docs/api-reference/emails/send-email) for more + +## Outputs +The `notify` function of `MailchimpProvider` outputs the following format (example): + +```json +{ + "email": "user@example.com", + "status": "sent", + "_id": "8db77476a09d4b47ae1b9bc69d1c74e3", + "reject_reason": null, + "queued_reason": null +} +``` + +See [documentation](https://mailchimp.com/developer/transactional/guides/quick-start/) for more + + +## Authentication Parameters +The Mailchimp provider requires the following authentication parameter: + +- `api_key`: Required. Mailchimp Transactional API key. You can obtain an API key by visiting [Mailchimp API Keys](https://mandrillapp.com//settings). + +## Connecting with the Provider +To connect with the Mailchimp provider and send emails through Keep, follow these steps: + +1. Obtain a Mailchimp Transactional API key: Visit [Mailchimp API Keys](https://mandrillapp.com//settings) to obtain an API key if you don't have one already. +2. Configure the Mailchimp provider in your system with the obtained API key. +3. Use the following YAML example to send an email notification using the Mailchimp provider: + +```yaml title=examples/alert_example.yml +# Send an email notification using the Mailchimp provider. +alert: + id: email-notification + description: Send an email notification using Mailchimp + actions: + - name: send-email + provider: + type: mailchimp + config: "{{ providers.mailchimp-provider }}" + with: + _from: "sender@example.com" + to: "recipient@example.com" + subject: "Hello from Mailchimp Provider" + html: "

This is the email body.

" +``` + +## Useful Links +- [Mailchimp API Keys](https://mailchimp.com/developer/transactional/guides/quick-start/#generate-your-api-key) diff --git a/docs/providers/documentation/mailgun-provider.mdx b/docs/providers/documentation/mailgun-provider.mdx new file mode 100644 index 0000000000..0cd814c0f5 --- /dev/null +++ b/docs/providers/documentation/mailgun-provider.mdx @@ -0,0 +1,75 @@ +--- +title: "Mailgun Provider" +description: "Mailgun Provider allows sending alerts to Keep via email." +--- + + + Mailgun currently supports receiving alerts via email. We will add querying + and notifying soon. + + +## Inputs + +Mailgun Provider does not currently support the `notify` function. + +## Outputs + +Mailgun Provider does not currently support the `query` function. + +## Authentication Parameters + +The Mailgun Provider uses API token authentication. You need to provide the following authentication parameters to connect to Mailgun: + +- **email** (optional): Email address to send alerts to. This will get populated automatically after installation. +- **sender** (optional): Sender email address to validate. For example, `.*@keephq.dev`. Leave empty for any. +- **extraction** (optional): Extraction Rules. Read more about extraction in Keep's Mailgun documentation. + +## Connecting with the Provider + +To connect to Mailgun, you do not need to perform any actions on the Mailgun side. We use our own Mailgun account and handle everything for you. + +## Post Installation Validation + +You can check that the Mailgun Provider works by sending a test email to the configured email address. + +1. Send a test email to the email address provided in the `authentication` section. +2. Check Keep's platform to see if the alert is received. + + + + + +## Default Alert Values + +When no extraction rules are set, the default values for every alert are as follows: + +- **name**: The subject of the email. +- **source**: The sender of the email. +- **message**: The stripped text content of the email. +- **timestamp**: The timestamp of the email, converted to ISO format. +- **severity**: "info" +- **status**: "firing" + +## How Extraction Works + +Extraction rules allow you to extract specific information from the email content using regular expressions. This can be useful for parsing and structuring the alert data. + + + + + +### Example Extraction Rule + +An extraction rule is defined as a dictionary with the following keys: + +- **key**: The key in the email event to apply the extraction rule to. +- **value**: The regular expression to use for extraction. + +#### Example + +Extract the severity from the subject of the email. + +``` +Key: subject +Value: (?P\w+): +``` diff --git a/docs/providers/documentation/mattermost-provider.mdx b/docs/providers/documentation/mattermost-provider.mdx new file mode 100644 index 0000000000..744d6d9fce --- /dev/null +++ b/docs/providers/documentation/mattermost-provider.mdx @@ -0,0 +1,35 @@ +--- +title: "Mattermost Provider" +sidebarTitle: "Mattermost Provider" +description: "Mattermost provider is used to send messages to Mattermost." +--- + +## Inputs + +The `notify` function takes the following parameters as inputs: + +- `message`: Optional. The alert message to send to Mattermost. +- `blocks`: Optional. An array of blocks to format the message content. +- `channel`: Optional. The Mattermost channel to which the message should be sent. + +## Outputs + +N/A + +## Authentication Parameters + +The `MattermostProvider` requires the following authentication parameter: + +- `webhook_url`: Required. Mattermost Webhook URL. + +## Connecting with the Provider + +1. **Obtain a Mattermost Webhook URL:** + - Go to the Mattermost Incoming Webhook API documentation: [Mattermost Incoming Webhooks](https://docs.mattermost.com/developer/webhooks-incoming.html). + - Follow the instructions to create a new incoming webhook. + - Copy the generated webhook URL, which should be passed as the `webhook_url` for authentication. + + +## Useful Links + +- [Mattermost Incoming Webhooks](https://developers.mattermost.com/integrate/webhooks/incoming/) diff --git a/docs/providers/documentation/microsoft-planner-provider.mdx b/docs/providers/documentation/microsoft-planner-provider.mdx new file mode 100644 index 0000000000..6eb9031943 --- /dev/null +++ b/docs/providers/documentation/microsoft-planner-provider.mdx @@ -0,0 +1,44 @@ +--- +title: "Microsoft Planner Provider" +description: "Microsoft Planner Provider for creating tasks in Planner." +--- + +## Inputs + +- **title** (required): The title of the task to be created. +- **plan_id** (required): The ID of the Planner plan where the task will be created. +- **bucket_id** (optional): The ID of the bucket where the task will be placed. + + + +## Authentication Parameters + +The Microsoft Planner Provider uses the following authentication parameters to generate an access token for authentication. You need to provide the following authentication parameters to connect to the Microsoft Planner Provider: + +- **client_id** (required): The client ID of your registered application in Azure. +- **client_secret** (required): The client secret generated for your registered application in Azure. +- **tenant_id** (required): The tenant ID where the authentication app was registered in Azure. + +## Connecting with the Provider + +To connect to Microsoft Planner, follow these steps: + +1. Log in to your [Azure](https://azure.microsoft.com/) account. +2. Register a new application [here](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/CreateApplicationBlade/isMSAApp~/false). +3. After successfully registering the application, navigate to the **API permissions** page and add the following permissions: + - `Tasks.Read.All` + - `Tasks.ReadWrite.All` +4. Go to the **Overview** page and make note of the `Application (client) ID` and `Directory (tenant) ID`. +5. Visit the **Certificates & secrets** page, create a new client secret, and make note of the client secret value. +6. Add the client ID, client secret, and tenant ID to the `authentication` section in the Microsoft Planner Provider configuration. + +## Notes + +- This provider enables you to interact with Microsoft Planner to create tasks. + +## Useful Links + +- [Microsoft Planner](https://learn.microsoft.com/en-us/graph/api/resources/planner-overview?view=graph-rest-1.0) +- [Azure](https://azure.microsoft.com/) + +# diff --git a/docs/providers/documentation/mock-provider.mdx b/docs/providers/documentation/mock-provider.mdx new file mode 100644 index 0000000000..7b95f37a29 --- /dev/null +++ b/docs/providers/documentation/mock-provider.mdx @@ -0,0 +1,29 @@ +--- +title: "Mock" +sidebarTitle: "Mock Provider" +description: "Template Provider is a template for newly added provider's documentation" +--- + +## Inputs + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Authentication Parameters + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Connecting with the Provider + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Useful Links + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ diff --git a/docs/providers/documentation/mongodb-provider.mdx b/docs/providers/documentation/mongodb-provider.mdx new file mode 100644 index 0000000000..6294c93ae5 --- /dev/null +++ b/docs/providers/documentation/mongodb-provider.mdx @@ -0,0 +1,47 @@ +--- +title: "MongoDB" +sidebarTitle: "MongoDB Provider" +description: "MongoDB Provider is a provider used to query MongoDB databases" +--- + +## Inputs + +The `query` function of `MongoDBProvider` takes the following arguments: + +- `query` (str): A string containing the query to be executed against the MongoDB database. +- `single_row` (bool, optional): If `True`, the function will return only the first result. + +## Outputs + +The `query` function returns either a `list` or a `tuple` of results, depending on whether `single_row` was set to `True` or not. If `single_row` was `True`, then the function returns a single result. + +## Authentication Parameters + +The following authentication parameters are used to connect to the MongoDB database: + +- `host` (str): The MongoDB connection URI. It can be a full uri with database, authSource, user, pass; or just hostip. +- `username` (str, optional): The MongoDB username. +- `password` (str, optional): The MongoDB password. +- `database` (str, optional): The name of the MongoDB database. +- `authSource` (str, optional): The name of the database against which authentication needs to be done. +- `additional_options` (str, optional): Additinal options to be passed to MongoClient as kwargs. + +## Connecting with the Provider + +In order to connect to the MongoDB database, you can use either a connection URI or individual parameters. Here's how you can provide authentication information: + +1. If using a connection URI, provide the `host` parameter with the MongoDB connection string. +2. If using individual parameters, provide the following: + - `username`: MongoDB username. + - `password`: MongoDB password. + - `host`: MongoDB hostname. + - `database`: MongoDB database name. + - `authSource`: MongoDB database name. + +## Notes + +- Ensure that the provided user has the necessary privileges to execute queries on the specified MongoDB database. + +## Useful Links + +- [MongoDB Documentation](https://docs.mongodb.com/) diff --git a/docs/providers/documentation/mysql-provider.mdx b/docs/providers/documentation/mysql-provider.mdx new file mode 100644 index 0000000000..3d5edb82bb --- /dev/null +++ b/docs/providers/documentation/mysql-provider.mdx @@ -0,0 +1,41 @@ +--- +title: "MySQL" +sidebarTitle: "MySQL Provider" +description: "MySQL Provider is a provider used to query MySQL databases" +--- + +## Inputs + +The `query` function of `MysqlProvider` takes the following arguments: + +- `query` (str): A string containing the query to be executed against the MySQL database. +- `single_row` (bool, optional): If `True`, the function will return only the first result. + +## Outputs + +The `query` function returns either a `list` or a `tuple` of results, depending on whether `single_row` was set to `True` or not. If `single_row` was `True`, then the function returns a single result. + +## Authentication Parameters + +The following authentication parameters are used to connect to the MySQL database: + +- `username` (str): The MySQL username. +- `password` (str): The MySQL password. +- `host` (str): The MySQL hostname. +- `database` (str, optional): The name of the MySQL database. + +## Connecting with the Provider + +In order to connect to the MySQL database, you will need to create a new user with the required permissions. Here's how you can do this: + +1. Connect to the MySQL server as a user with sufficient privileges to create a new user. +2. Run the following command to create a new user: + `CREATE USER ''@'' IDENTIFIED BY ''`; +3. Grant the necessary permissions to the new user by running the following command: + `GRANT ALL PRIVILEGES ON .* TO ''@''`; + +## Notes + +## Useful Links + +- [MySQL Documentation](https://dev.mysql.com/doc/refman/8.0/en/) diff --git a/docs/providers/documentation/netdata-provider.mdx b/docs/providers/documentation/netdata-provider.mdx new file mode 100644 index 0000000000..b013564ecf --- /dev/null +++ b/docs/providers/documentation/netdata-provider.mdx @@ -0,0 +1,35 @@ +--- +title: "Netdata" +sidebarTitle: "Netdata Provider" +description: "Netdata provider allows you to get alerts from Netdata via webhooks." +--- + +## Overview + +The Netdata Provider enables seamless integration between Keep and Netdata, allowing alerts from Netdata to be directly sent to Keep through webhook configurations. This integration ensures that critical alerts are efficiently managed and responded to within Keep's platform. + +## Connecting Netdata to Keep + +To connect Netdata to Keep, you need to configure it as a webhook from Netdata. Follow the steps below to set up the integration: + +1. In Netdata, go to Space settings. +2. Go to "Alerts & Notifications". +3. Click on "Add configuration". +4. Add "Webhook" as the notification method. +5. Add a name to the configuration. +6. Select Room(s) to apply the configuration. +7. Select Notification(s) to apply the configuration. +8. In the "Webhook URL" field, add `https://api.keephq.dev/alerts/event/netdata`. +9. Generate an API key with webhook role from the Keep settings. +10. Add a request header with the key "x-api-key" and API key as the value. +11. Leave the Authentication as "No Authentication". +12. Add the "Challenge secret" as "keep-netdata-webhook-integration". +13. Save the configuration. + +## Useful Links + +- [Netdata](https://www.netdata.cloud/) + +## Note + +- Currently, Netdata don't support webhook in on-premises installations. diff --git a/docs/providers/documentation/new-relic-provider.mdx b/docs/providers/documentation/new-relic-provider.mdx new file mode 100644 index 0000000000..165f74cb46 --- /dev/null +++ b/docs/providers/documentation/new-relic-provider.mdx @@ -0,0 +1,33 @@ +--- +title: "New Relic" +sidebarTitle: "New Relic Provider" +description: "New Relic Provider enables querying AI alerts and registering webhooks." +--- + +## Inputs + +- `account_id` (required): Account id of the new relic account. + +## Authentication Parameters +- `account_id` (required): Account id of the new relic account. +- `api_key` (required): New Relic User key. To receive webhooks, use `User key` of an admin account. +- `api_url` (required): API url to query from NRQL either US or EU based. + +## Connecting with the Provider + +1. Go to https://one.newrelic.com/admin-portal/api-keys/home to create User Key. +2. Get `api_key` and `account_id` from the key created. +3. Based on region get `api_url` from here https://docs.newrelic.com/docs/apis/rest-api-v2/get-started/introduction-new-relic-rest-api-v2 . + +## Webhook Integration Modifications + +The webhook integration adds Keep as a destination within the "Alerts and AI" API within New Relic. +This grants Keep access to the following scopes within New Relic: +- `ai.destinations:read` +- `ai.destinations:write` +- `ai.channels:read` +- `ai.channels:write` + +## Useful Links + +- https://docs.newrelic.com/docs/apis/rest-api-v2/get-started/introduction-new-relic-rest-api-v2 diff --git a/docs/providers/documentation/ntfy-provider.mdx b/docs/providers/documentation/ntfy-provider.mdx new file mode 100644 index 0000000000..994bd41a83 --- /dev/null +++ b/docs/providers/documentation/ntfy-provider.mdx @@ -0,0 +1,59 @@ +--- +title: "Ntfy.sh" +sidebarTitle: "Ntfy.sh Provider" +description: "Ntfy.sh allows you to send notifications to your devices" +--- + +## Authentication Parameters + +The Ntfy.sh provider requires the following authentication parameters: + +- `Ntfy Access Token`: The access token for the Ntfy.sh account. This is required for the Ntfy.sh provider. +- `Ntfy Host URL`: (For self-hosted Ntfy) The URL of the self-hosted Ntfy instance in the format `https://ntfy.example.com`. +- `Ntfy Username`: (For self-hosted Ntfy) The username for the self-hosted Ntfy instance. +- `Ntfy Password`: (For self-hosted Ntfy) The password for the self-hosted Ntfy instance. + +## Connecting with the Provider + +Obtain Ntfy Access Token (For Ntfy.sh only) + +1. Create an account on [Ntfy.sh](https://ntfy.sh/). +2. After logging in, go to the [Access token](https://ntfy.sh/account) page. +3. Click on the `CREATE ACCESS TOKEN`. Give it a label and select token expiration time and click on the `CREATE TOKEN` button. +4. Copy the generated token. This will be used as the `Ntfy Access Token` in the provider settings. + +Self-Hosted Ntfy + +1. To self-host Ntfy, you can follow the instructions [here](https://docs.ntfy.sh/install/). +2. For self-hosted Ntfy, you will need to provide the `Ntfy Host URL`, `Ntfy Username`, and `Ntfy Password` in the provider settings instead of the `Ntfy Access Token`. +3. Create a new user for the self-hosted Ntfy instance and use the generated username and password in the provider settings. + +Subscribing to a Topic (For Ntfy.sh and self-hosted Ntfy) + +1. Login to your Ntfy.sh account. +2. Click on `Subscribe to a topic` button and generate name for the topic and subscribe to it. +3. Copy the generated topic name. This will be used as the `Ntfy Subcription Topic` in the provider settings. +4. Reserve the topic and confiure access (Requires ntfy Pro) + +## Example of usage +``` +workflow: + id: ntfy-example + description: ntfy-example + triggers: + - type: manual + actions: + - name: ntfy + provider: + type: ntfy + config: "{{ providers.ntfy }}" + with: + message: "test-message" + topic: "test-topic" + +``` + +## Usefull Links + +- [Ntfy.sh](https://ntfy.sh/) +- [To self-host Ntfy](https://docs.ntfy.sh/install/) diff --git a/docs/providers/documentation/openobserve-provider.mdx b/docs/providers/documentation/openobserve-provider.mdx new file mode 100644 index 0000000000..610f2947e1 --- /dev/null +++ b/docs/providers/documentation/openobserve-provider.mdx @@ -0,0 +1,33 @@ +--- +title: "OpenObserve" +sidebarTitle: "OpenObserve Provider" +description: "OpenObserve provider allows you to get OpenObserve `alerts/actions` via webhook installation" +--- + +## Authentication Parameters +The OpenObserve provider requires the following authentication parameters: + +- `OpenObserve Username`: Required. This is your OpenObserve account username. +- `OpenObserve Password`: This is the password associated with your OpenObserve Username. +- `OpenObserve Host`: This is the hostname of the OpenObserve instance you wish to connect to. It identifies the OpenObserve server that the API will interact with. +- `OpenObserve Port`: This is the port number for the OpenObserve host, default is 5080. +- `Organisation ID`: The ID of the organisation in which you would like to install the webhook. + +## Connecting with the Provider + +Obtain OpenObserve Username and Password: +1. To see how to install and set Credentials: [here](https://openobserve.ai/docs/quickstart/#self-hosted-installation) +2. Get the Organisation ID of the OpenObserve instance in which you wish to install the webhook. + +## Webhook Integration Modifications + +The webhook integration adds Keep as an alert monitor within the OpenObserve instance. It can be found under the "Alerts & Respond" section. +The integration automatically gains access to the following scopes within OpenObserve: +- `authenticated` + +## Useful Links + +- [OpenObserve Alert Templates](https://openobserve.ai/docs/user-guide/alerts/templates) +- [OpenObserve API Spec](https://openobserve.ai/docs/api_specs/#?route=overview) +- [OpenObserve Destinations](https://openobserve.ai/docs/user-guide/alerts/destinations/) +- [OpenObserve Installation and Credentials](https://openobserve.ai/docs/quickstart/#self-hosted-installation) diff --git a/docs/providers/documentation/openshift-provider.mdx b/docs/providers/documentation/openshift-provider.mdx new file mode 100644 index 0000000000..f097c97095 --- /dev/null +++ b/docs/providers/documentation/openshift-provider.mdx @@ -0,0 +1,34 @@ +--- +title: "Openshift" +description: "Openshift provider to perform rollout restart action on specific resources." +--- + +## Inputs + +- **kind** (required): Kind of the object which will be run rollout restart action run (`deployments`, `statefulset`, `daemonset`). +- **name** (required): Name of the object which will be run rollout restart action run. + +## Outputs + +- **message**: Message for the action performed. + +## Authentication Parameters + +This provider offers you to authenticate with Openshift using: api_server, token and insecure. + +- **api_server** (required): The api server url of your Openshift cluster. +- **token** (required): The token of your user to authenticate with Openshift. +- **insecure** (optional): If you want to skip the certificate verification, set this to `True`. + +## Connecting with the Provider + +To connect to Openshift, follow below steps: + +1. Log in to your Openshift cluster and create a new service account with required roles. +2. Get the token of the service account. +3. Use the token to authenticate with Openshift. + +## Notes + +- This provider allows you to interact with Openshift to perform rollout restart actions. + diff --git a/docs/providers/documentation/opsgenie-provider.mdx b/docs/providers/documentation/opsgenie-provider.mdx new file mode 100644 index 0000000000..7eb664d7ee --- /dev/null +++ b/docs/providers/documentation/opsgenie-provider.mdx @@ -0,0 +1,54 @@ +--- +title: "Opsgenie Provider" +description: "OpsGenie Provider is a provider that allows to create alerts in OpsGenie." +--- + +## Inputs + +The `notify` function in the `OpsgenieProvider` use OpsGenie [CreateAlertPayload](https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/CreateAlertPayload.md): + +### Properties + +| Name | Type | Description | Notes | +| --------------- | ---------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | ---------- | +| **user** | **str** | Display name of the request owner | [optional] | +| **note** | **str** | Additional note that will be added while creating the alert | [optional] | +| **source** | **str** | Source field of the alert. Default value is IP address of the incoming request | [optional] | +| **message** | **str** | Message of the alert | | +| **alias** | **str** | Client-defined identifier of the alert, that is also the key element of alert deduplication. | [optional] | +| **description** | **str** | Description field of the alert that is generally used to provide a detailed information about the alert. | [optional] | +| **responders** | **list**[[Recipient](https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/Recipient.md)] | Responders that the alert will be routed to send notifications | [optional] | +| **visible_to** | **list**[[Recipient](https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/Recipient.md)] | Teams and users that the alert will become visible to without sending any notification | [optional] | +| **actions** | **list[str]** | Custom actions that will be available for the alert | [optional] | +| **tags** | **list[str]** | Tags of the alert | [optional] | +| **details** | **dict(str, str)** | Map of key-value pairs to use as custom properties of the alert | [optional] | +| **entity** | **str** | Entity field of the alert that is generally used to specify which domain alert is related to | [optional] | +| **priority** | **str** | Priority level of the alert | [optional] | + +## Authentication Parameters + +The OpsgenieProviderAuthConfig class takes the following parameters: + +```python +api_key (str | None): API key, which is a user or team API key. Optional, default is `None`. *Required* +``` + +## Connecting with the Provider + +To use the Opsgenie Provider, you'll need to provide api_key. + +You can create an integration key under Settings -> Integrations -> Add API +Note: if you are in the free tier, the integration key can be created under Teams -> Your team -> Integrations -> Add Integration (API) + +## Scopes + +Certain scopes may be required to perform specific actions or queries via the Opsgenie Provider. Below is a summary of relevant scopes and their use cases: + +- opsgenie:create (Create alerts) + Required: True + Description: It allows to create, close and comment OpsGenie alerts. + +## Useful Links + +- How to create Opsgenie API Integration - https://support.atlassian.com/opsgenie/docs/create-a-default-api-integration/ +- How to get Opsgenie Integration Api Key - https://community.atlassian.com/t5/Opsgenie-questions/OpsGenie-API-Create-alert-Authentication-problem/qaq-p/1531047?utm_source=atlcomm&utm_medium=email&utm_campaign=immediate_general_question&utm_content=topic#U1531256 diff --git a/docs/providers/documentation/pagerduty-provider.mdx b/docs/providers/documentation/pagerduty-provider.mdx new file mode 100644 index 0000000000..3461b99562 --- /dev/null +++ b/docs/providers/documentation/pagerduty-provider.mdx @@ -0,0 +1,68 @@ +--- +title: "Pagerduty Provider" +description: "Pagerduty Provider is a provider that allows to create incidents or post events to Pagerduty." +--- + +## Inputs + +- `title`: str: Title of the alert or incident. +- `alert_body`: str: UTF-8 string of custom message for alert. Shown in incident body for events, and in the body for incidents. +- `dedup`: str | None: Any string, max 255 characters, used to deduplicate alerts for events. +- `service_id`: str: ID of the service for incidents. +- `body`: dict: Body of the incident. +- `requester`: str: Requester of the incident. +- `incident_key`: str | None: Key to identify the incident. If not given, a UUID will be generated. + +## Authentication Parameters + +The `api_key` or `routing_key` are required for connecting to the Pagerduty provider. You can obtain them as described in the "Connecting with the Provider" section. + +Routing key, which is an integration or ruleset key. API key, which is a user or team API key. + +## Connecting with the Provider + +To use the PagerdutyProvider, you'll need to provide either a routing_key or an api_key. + +You can find your integration key or routing key in the PagerDuty web app under **Configuration** > **Integrations**, and select the integration you want to use. +You can find your API key in the PagerDuty web app under **Configuration** > **API Access**. + +The routing_key is used to post events to Pagerduty using the events API. +The api_key is used to create incidents using the incidents API. + +## Scopes + +Certain scopes may be required to perform specific actions or queries via the Pagerduty Provider. Below is a summary of relevant scopes and their use cases: + +- incidents_read (Incidents Read) + Required: True + Description: View incidents. +- incidents_write (Incidents Write) + Required: False + Description: Write incidents. +- webhook_subscriptions_read (Webhook Subscriptions Read) + Required: False + Description: View webhook subscriptions. + (*Required for auto-webhook integration) +- webhook_subscriptions_write (Webhook Subscriptions Write) + Required: False + Description: Write webhook subscriptions. + (*Required for auto-webhook integration) + + +## Notes + +The provider uses either the events API or the incidents API to create an alert or an incident. The choice of API to use is determined by the presence of either a routing_key or an api_key. + +An expired trial while using the free version of PagerDuty may result in the "pagerduty scopes are invalid" error at Keep. + +## Webhook Integration Modifications + +The webhook integration adds Keep as a destination within the "Integrations" API within Pagerduty. +This grants Keep access to the following scopes within Pagerduty: +- `webhook_subscriptions_read` +- `webhook_subscriptions_write` + +## Useful Links + +- Pagerduty Events API documentation: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2 +- Pagerduty Incidents API documentation: https://v2.developer.pagerduty.com/docs/create-an-incident-incidents-api-v2 diff --git a/docs/providers/documentation/pagertree-provider.mdx b/docs/providers/documentation/pagertree-provider.mdx new file mode 100644 index 0000000000..59e93b793a --- /dev/null +++ b/docs/providers/documentation/pagertree-provider.mdx @@ -0,0 +1,47 @@ +--- +title: "Pagertree Provider" +description: "The Pagertree Provider facilitates interactions with the Pagertree API, allowing the retrieval and management of alerts." +--- + +## Inputs + +The `notify` function in the `PagertreeProvider` class takes the following parameters: + +```python +kwargs(dict): + title (str): Title of the alert or incident. *Required* + urgency (Literal["low", "medium", "high", "critical"]): Defines the urgency of the alert. *Required* + incident (bool, default=False): If True, sends data as an incident. *Optional* + severities (Literal["SEV-1", "SEV-2", "SEV-3", "SEV-4", "SEV-5", "SEV_UNKNOWN"], default="SEV-5"): Specifies the severity level of the incident. *Optional* + incident_message (str, default=""): Message describing the incident. *Optional* + description (str, default=""): Detailed description of the alert or incident. *Optional* + status (Literal["queued", "open", "acknowledged", "resolved", "dropped"], default="queued"): Status of the alert or incident. *Optional* + destination_team_ids (list[str], default=[]): List of team IDs that the alert or incident will be sent to. *Optional* + destination_router_ids (list[str], default=[]): List of router IDs that the alert or incident will be sent to. *Optional* + destination_account_user_ids (list[str], default=[]): List of account user IDs that the alert or incident will be sent to. *Optional* + **kwargs (dict): Additional keyword arguments that might be needed for future use. *Optional* +``` + + +### Authentication Parameters + +The `PagertreeProviderAuthConfig` class takes the following parameters: +- api_token (str): Your Pagertree API Token. *Required* + + +## Connecting with the Provider + +- To interact with the Pagertree API, you need to provide an api_token. +- You can view and manage your API keys on your [User Settings](https://app.pagertree.com/user/settings) page. + + +## Notes + +_This provider uses the Pagertree API to send alerts or mark them as incidents based on the parameters provided. Depending on whether an incident is flagged as true, it either calls `__send_alert` or `__send_incident` method._ + + +## Useful Links + +- Pagertree API documentation: [Pagertree API](https://pagertree.com/docs) +- Pagertree Authentication: [Authentication](https://pagertree.com/docs/api/authentication) +- Pagertree Alerts: [Alerts & Incident](https://pagertree.com/docs/api/alerts) \ No newline at end of file diff --git a/docs/providers/documentation/parseable-provider.mdx b/docs/providers/documentation/parseable-provider.mdx new file mode 100644 index 0000000000..624a27cf98 --- /dev/null +++ b/docs/providers/documentation/parseable-provider.mdx @@ -0,0 +1,46 @@ +--- +title: "Parseable" +sidebarTitle: "Parseable Provider" +description: "Parseable provider allows integration with Parseable, a tool for collecting and querying logs." +--- + +## Inputs + +- log_message: str: The log message to send to Parseable +- log_level: str (optional): The log level (e.g., `info`, `error`, `warning`) + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page._ + +## Authentication Parameters + +- `api_key`: API key for authenticating with Parseable. +- `parseable_url`: The URL of the Parseable instance where logs will be sent. + +## Connecting with the Provider + +1. Obtain an API key from your Parseable instance. +2. Configure your provider using the `api_key` and `parseable_url`. + +## Example of usage + +```yaml +workflow: + id: parseable-example + description: Parseable example + triggers: + - type: manual + actions: + - name: parseable + provider: + type: parseable + config: "{{ providers.parseabletest }}" + with: + log_message: "This is a test log message" + log_level: "info" + + + +## Usefull Links +-[Parseable API Documentation](https://www.parseable.com/docs/api) \ No newline at end of file diff --git a/docs/providers/documentation/pingdom-provider.mdx b/docs/providers/documentation/pingdom-provider.mdx new file mode 100644 index 0000000000..42c5d036b5 --- /dev/null +++ b/docs/providers/documentation/pingdom-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "Pingdom" +sidebarTitle: "Pingdom Provider" +description: "Pingdom provider allows you to pull alerts from Pingdom or install Keep as webhook." +--- + +## Inputs + +Pingdom Provider does not currently support the `notify` function. + +## Outputs + +Pingdom Provider does not currently support the `query` function. + +## Authentication Parameters + +The `api_key` is required for connecting to the Pingdom provider. You can obtain them as described in the "Connecting with the Provider" section. + +## Connecting with the Provider + +### API Key + +To obtain the Pingdom API key, follow these steps: + +1. Log in to your Pingdom account. +2. Navigate to the "Settings" section. +3. Click on the "Pingdom API" tab. +4. Generate a new API Key. + + +## Fingerprinting + +Fingerprints in Pingdom are calculated based on the `check_id` incoming/pulled event. + +## Scopes + +- read (Read) + Required: True + Description: Read data from your Pingdom account. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link at the bottom of the page_ + +## Useful Links + +- [Pingdom Webhook Documentation](https://www.pingdom.com/resources/webhooks) +- [Pingdom Actions API](https://docs.pingdom.com/api/#tag/Actions) diff --git a/docs/providers/documentation/planner-provider.mdx b/docs/providers/documentation/planner-provider.mdx new file mode 100644 index 0000000000..f6a538c109 --- /dev/null +++ b/docs/providers/documentation/planner-provider.mdx @@ -0,0 +1,44 @@ +--- +title: "Microsoft Planner Provider" +description: "Microsoft Planner Provider to create task in planner." +--- + +## Inputs + +- **title** (required): The title of the incident. +- **plan_id** (required): Plan id inside which the task will be created. +- **bucket_id** (optional): Bucket id (unique id of the board inside a plan) inside which the task should be created, if not provided the task will be created in `No bucket` board. + +## Outputs + +Microsoft Planner Provider does not currently support the `query` function. + +## Authentication Parameters + +The Microsoft Planner Provider uses client_id, client_secret and tenant_id to generate access_token for authentication. You need to provide the following authentication parameters to connect to Microsoft Planner Provider: + +- **client_id** (required): The client id of your registered application in azure. +- **client_secret** (required): The client secret generated inside your registered application in azure. +- **tenant_id** (required): The tenant id where the authentication app was registered in azure. + +## Connecting with the Provider + +To connect to Microsoft Planner, follow below steps: + +1. Log in to your [Azure](https://azure.microsoft.com/) account. +2. Register an application [here](https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/CreateApplicationBlade/isMSAApp~/false). +3. After successfully registering the application, go to the **API permissions** page and add the below permissions: + - `Tasks.Read.All` + - `Tasks.ReadWrite.All` +4. Go to **Overview** page and note the `Application (client) ID` and `Directory (tenant) ID`. +5. Go to **Certificates & secrets** page, create a new client secret and note the client secret value. +6. Add the client id, client secret and tenant id to the `authentication` section in the Microsoft Planner Provider configuration. + +## Notes + +- This provider allows you to interact with Microsoft Planner Provider to create tasks. + +## Useful Links + +- [Microsoft Planner Provider Documentation](https://learn.microsoft.com/en-us/graph/api/planner-post-tasks?view=graph-rest-1.0&tabs=http) +- [Create an Azure Active Directory app](https://learn.microsoft.com/en-us/graph/toolkit/get-started/add-aad-app-registration) \ No newline at end of file diff --git a/docs/providers/documentation/postgresql-provider.mdx b/docs/providers/documentation/postgresql-provider.mdx new file mode 100644 index 0000000000..5e3236b46d --- /dev/null +++ b/docs/providers/documentation/postgresql-provider.mdx @@ -0,0 +1,45 @@ +--- +title: "PostgreSQL" +sidebarTitle: "PostgreSQL Provider" +description: "PostgreSQL Provider is a provider used to query POSTGRES databases" +--- + +## Inputs + +The `query` function of `PsqlProvider` takes the following arguments: + +- `query` (str): A string containing the query to be executed against the POSTGRES database. +- `single_row` (bool, optional): If `True`, the function will return only the first result. + +## Outputs + +The `query` function returns either a `list` or a `tuple` of results, depending on whether `single_row` was set to `True` or not. If `single_row` was `True`, then the function returns a single result. + +## Authentication Parameters + +The following authentication parameters are used to connect to the POSTGRES database: + +- `user` (str): The Postgres username. +- `password` (str): The Postgres password. +- `host` (str): The Postgres hostname. +- `dbname` (str, optional): The name of the Postgres database. +- `port` (str, optional): The Postgres server port. + +## Connecting with the Provider + +In order to connect to the Postgres database, you will need to create a new user with the required permissions. Here's how you can do this: + +1. Connect to the Postgresql server as a user with sufficient privileges to create a new user. +2. Run the following command to create a new user: + `CREATE USER '' WITH ENCRYPTED PASSWORD ''`; +3. Run the following command to create a database: + `CREATE DATABASE '';`; +4. Grant the necessary permissions to the new user by running the following command: + `GRANT ALL PRIVILEGES ON .* TO ''`; + +## Notes + +## Useful Links + +- [Postgresql Documentation](https://www.postgresql.org/docs/) +- [Creating user,database and adding access on psql](https://medium.com/coding-blocks/creating-user-database-and-adding-access-on-postgresql-8bfcd2f4a91e) diff --git a/docs/providers/documentation/prometheus-provider.mdx b/docs/providers/documentation/prometheus-provider.mdx new file mode 100644 index 0000000000..f4bd6e5fb1 --- /dev/null +++ b/docs/providers/documentation/prometheus-provider.mdx @@ -0,0 +1,47 @@ +--- +title: "Prometheus" +sidebarTitle: "Prometheus Provider" +description: "Prometheus provider allows integration with Prometheus for monitoring and alerting purposes." +--- + +## Inputs + +- `query`: str : The Prometheus query to execute +- `time_range`: str (optional) : Time range for the query in Prometheus' duration format (e.g., `1h`, `30m`) + +## Outputs + +- `result`: The result of the Prometheus query, returned in a dictionary format containing the data. + +## Authentication Parameters + +- `prometheus_url`: URL of the Prometheus server where the queries will be executed. +- `api_token`: API token for secure access to Prometheus server (optional if server is open). + +## Connecting with the Provider + +1. Set up a Prometheus server and make sure it's running. +2. Get the `prometheus_url` where your Prometheus instance is accessible. +3. (Optional) Obtain the API token from your Prometheus configuration if it's protected. +4. Provide these values in the provider configuration. + +## Example of usage + +```yaml +workflow: + id: prometheus-example + description: Prometheus example + triggers: + - type: manual + actions: + - name: prometheus + provider: + type: prometheus + config: "{{ providers.prometheustest }}" + with: + query: "up" + time_range: "1h" + +## Useful Links +-[Prometheus Querying API Documentation](https://prometheus.io/docs/prometheus/latest/querying/api/) +-[Prometheus Official Documentation](https://prometheus.io/docs/introduction/overview/) \ No newline at end of file diff --git a/docs/providers/documentation/pushover-provider.mdx b/docs/providers/documentation/pushover-provider.mdx new file mode 100644 index 0000000000..cf552e604c --- /dev/null +++ b/docs/providers/documentation/pushover-provider.mdx @@ -0,0 +1,34 @@ +--- +title: "Pushover" +sidebarTitle: "Pushover Provider" +description: "Pushover docs" +--- + +## Inputs + +The Pushover provider gets "message" as an input which will be used as the notification message. +Configuration example: + +``` +pushover: + authentication: + token: XXXXXXXXXXXXXXXX + user_key: XXXXXXXXXXXXXXXX +``` + +## Outputs + +None. + +## Authentication Parameters + +The Pushover provider gets two authentication parameters. + +Token: +![Token](/images/token.jpeg) +User key: +![User key](/images/user-key.jpeg) + +## Useful Links + +- https://support.pushover.net/i44-example-code-and-pushover-libraries#python diff --git a/docs/providers/documentation/python-provider.mdx b/docs/providers/documentation/python-provider.mdx new file mode 100644 index 0000000000..fe539366b6 --- /dev/null +++ b/docs/providers/documentation/python-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "Python" +sidebarTitle: "Python Provider" +description: "Python provider allows executing Python code snippets." +--- + +## Inputs + +- `script`: str: Python script to execute + +## Outputs + +- `result`: The output of the Python script + +## Authentication Parameters + +_None required for local execution._ + +## Limitations + +- The Python provider is currently disabled for cloud execution. This means that Python scripts cannot be executed in a cloud environment. +- Users must ensure that the scripts are compatible with the local execution environment. + +## Connecting with the Provider + +The Python provider allows you to run small Python scripts. + +## Example of usage + +```yaml +workflow: + id: python-example + description: Python example + triggers: + - type: manual + actions: + - name: python + provider: + type: python + config: "{{ providers.pythontest }}" + with: + script: | + print("Hello, world!") + + +## Usefull Links + +-[Python Documentation](https://docs.python.org/3/) \ No newline at end of file diff --git a/docs/providers/documentation/quickchart-provider.mdx b/docs/providers/documentation/quickchart-provider.mdx new file mode 100644 index 0000000000..d20a2360fd --- /dev/null +++ b/docs/providers/documentation/quickchart-provider.mdx @@ -0,0 +1,71 @@ +--- +title: "QuickChart Provider" +sidebarTitle: "QuickChart Provider" +description: "The QuickChart provider enables the generation of chart images through a simple and open API, allowing visualization of alert trends and counts. It supports both anonymous usage and authenticated access with an API key for enhanced functionality." +--- + +# QuickChart Provider + +## Overview + +The QuickChart provider allows for the generation of two types of charts based on alert data within Keep's platform: + +1. A line chart that shows the trend of a specific fingerprint alert over time. +2. A radial gauge chart displaying the total number of alerts Keep received for this fingerprint. + +These charts can be used in various reports, dashboards, or alert summaries to provide visual insights into alert activity and trends. + +## Inputs + +- `fingerprint`: The unique identifier of the alert whose trend you want to visualize. This is required. +- `status`: (Optional) The status of alerts to filter by (e.g., firing, resolved). Defaults to all statuses. +- `chartConfig`: (Optional) Custom chart configuration settings in JSON format. Default settings will be used if not provided. + +## Outputs + +The output is a JSON object that includes URLs to the generated chart images: + +- `chart_url`: URL of the trend chart image. + + + + + +- `counter_url`: URL of the total alerts gauge chart image. + + + + + +## Authentication Parameters + +- `api_key`: (Optional) QuickChart API Key. The provider can be used without an API key, but for more advanced usage, such as generating more complex charts or handling higher request volumes, an API key is recommended. + +## Connecting with the Provider + +### Using QuickChart without an API Key + +The QuickChart provider can generate charts without the need for an API key. However, this usage is limited to basic functionality and lower request limits. + +### Using QuickChart with an API Key + +To unlock more advanced features and higher usage limits, you can use a QuickChart API key. Here's how to obtain one: + +1. Visit [QuickChart](https://quickchart.io/). +2. Sign up for a free account to get started. +3. Navigate to your account settings to find your API key. + +Once you have your API key, add it to the provider configuration in Keep. + +## Notes + +This provider is designed to offer flexible chart generation capabilities within Keep, enhancing how you visualize alert data and trends. It is ideal for users who want to quickly integrate visual representations of alert activity into their workflows. + +## Useful Links + +- [QuickChart API Documentation](https://quickchart.io/documentation/) +- [QuickChart Website](https://quickchart.io/) diff --git a/docs/providers/documentation/redmine-provider.mdx b/docs/providers/documentation/redmine-provider.mdx new file mode 100644 index 0000000000..d098ccb0c4 --- /dev/null +++ b/docs/providers/documentation/redmine-provider.mdx @@ -0,0 +1,96 @@ +--- +title: "Redmine" +sidebarTitle: "Redmine Provider" +--- + +# Redmine Provider + +`RedmineProvider` is a class that integrates with Redmine to manage issue tracking through Keep. + +## Inputs +The `_notify` function of `RedmineProvider` takes the following arguments: + +- `project_id` (str): Required. The ID of the Redmine project. +- `subject` (str): Required. The subject of the issue to be created. +- `priority_id` (str): Required. The priority ID for the issue. +- `description` (str): Optional. The description of the issue. +- `**kwargs` (dict): Optional. Additional parameters that can be passed as key-value pairs for the issue. + +## Outputs +The `_notify` function of `RedmineProvider` outputs the following format i.e. the created issue (example): + +```json +[ + { + "issue": { + "id": 2, + "project": { + "id": 1, + "name": "KeepHQ" + }, + "tracker": { + "id": 1, + "name": "Bug" + }, + "status": { + "id": 1, + "name": "New", + "is_closed": false + }, + "priority": { + "id": 4, + "name": "Urgent" + }, + "author": { + "id": 1, + "name": "UserName LastName" + }, + "subject": "Issue1", + "description": "A new Issue from KeepHQ", + "start_date": "2024-04-30", + "due_date": null, + "done_ratio": 0, + "is_private": false, + "estimated_hours": null, + "total_estimated_hours": null, + "created_on": "2024-04-30T11:59:17Z", + "updated_on": "2024-04-30T11:59:17Z", + "closed_on": null + } + } +] +``` + +## Authentication Parameters +The Redmine provider requires the following authentication parameters: + +- `host` (str): Required. The host URL of the Redmine server. +- `api_access_key` (str): Required. Redmine API Access Key. Refer to the [Redmine REST API documentation](https://www.redmine.org/projects/redmine/wiki/rest_api#Authentication) for details on obtaining an API key. + +## Connecting with the Provider +To connect with the Redmine provider and manage issues through Keep, follow these steps: + +1. Obtain a Redmine Personal Access Token: Visit the [Redmine API documentation](https://www.redmine.org/projects/redmine/wiki/rest_api#Authentication) to see the steps to get an API key. +2. Use the following YAML example to create an issue using the Redmine provider, all these are [valid arguments](https://www.redmine.org/projects/redmine/wiki/Rest_Issues#Creating-an-issue): + +```yaml title=examples/issue_creation_example.yml +# Create an issue using the Redmine provider. +task: + id: create-redmine-issue + description: Create an issue in Redmine + actions: + - name: create-issue + provider: + type: redmine + config: "{{ providers.redmine-provider }}" + with: + project_id: "example_project" + subject: "Issue Subject" + priority_id: "2" + description: "This is the issue description." +``` + +## Useful Links +- [Redmine REST API](https://www.redmine.org/projects/redmine/wiki/rest_api) +- [Authentication Guide](https://www.redmine.org/projects/redmine/wiki/rest_api#Authentication) +- [Valid arguments while creating issue](https://www.redmine.org/projects/redmine/wiki/Rest_Issues#Creating-an-issue) diff --git a/docs/providers/documentation/resend-provider.mdx b/docs/providers/documentation/resend-provider.mdx new file mode 100644 index 0000000000..0996887ff3 --- /dev/null +++ b/docs/providers/documentation/resend-provider.mdx @@ -0,0 +1,66 @@ +--- +title: "Resend" +sidebarTitle: "Resend Provider" +--- + +# Resend Provider + +ResendProvider is a class that implements the Resend API and allows email sending through Keep. + +## Inputs +The `notify` function of `ResendProvider` takes the following arguments: + +- `_from` (str): Required. The email address of the sender. +- `to` (str): Required. The email address of the recipient. +- `subject` (str): Required. The subject of the email. +- `html` (str): Required. The HTML body of the email. +- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. + +See [documentation](https://resend.com/docs/api-reference/emails/send-email) for more + +## Outputs +The `notify` function of `ResendProvider` outputs the following format (example): + +```json +{ + "id": "49a3999c-0ce1-4ea6-ab68-afcd6dc2e794", + "from": "onboarding@resend.dev", + "to": "user@example.com", + "created_at": "2022-07-25T00:28:32.493138+00:00" +} +``` + +See [documentation](https://resend.com/docs/api-reference/emails/send-email) for more + + +## Authentication Parameters +The Resend provider requires the following authentication parameter: + +- `api_key`: Required. Resend API key. You can obtain an API key by visiting [Resend API Keys](https://resend.com/api-keys). + +## Connecting with the Provider +To connect with the Resend provider and send emails through Keep, follow these steps: + +1. Obtain a Resend API key: Visit [Resend API Keys](https://resend.com/api-keys) to obtain an API key if you don't have one already. +2. Configure the Resend provider in your system with the obtained API key. +3. Use the following YAML example to send an email notification using the Resend provider: + +```yaml title=examples/alert_example.yml +# Send an email notification using the Resend provider. +alert: + id: email-notification + description: Send an email notification using Resend + actions: + - name: send-email + provider: + type: resend + config: "{{ providers.resend-provider }}" + with: + _from: "sender@example.com" + to: "recipient@example.com" + subject: "Hello from Resend Provider" + html: "

This is the email body.

" +``` + +## Useful Links +- [Resend API Keys](https://resend.com/api-keys) diff --git a/docs/providers/documentation/rollbar-provider.mdx b/docs/providers/documentation/rollbar-provider.mdx new file mode 100644 index 0000000000..521a3b98f2 --- /dev/null +++ b/docs/providers/documentation/rollbar-provider.mdx @@ -0,0 +1,27 @@ +--- +title: "Rollbar" +sidebarTitle: "Rollbar Provider" +description: "Rollbar provides real-time error tracking and debugging tools for developers." +--- + +## Authentication Parameters + +The Rollbar provider requires the following authentication parameters: + +- `rollbarAccessToken` - Project Access Token is used to authenticate the Rollbar API requests. + +## Connecting with the Provider + +1. Create an account on [Rollbar](https://rollbar.com/). +2. After logging in, navigate to the project you want to connect with and go to the project settings. +3. Under Setup, go to Project Access Tokens and create new token with read and write scopes. +4. Copy the generated token. +5. This will be used as the `rollbarAccessToken` parameter in the provider configuration. + +## Webhook Integration Modifications + +You can manage the permissions granted by the webhook integration by navigating to **Settings > Notifications > Webhook** within the Rollbar project. + +## Usefull Links + +- [Rollbar](https://rollbar.com/) diff --git a/docs/providers/documentation/sendgrid-provider.mdx b/docs/providers/documentation/sendgrid-provider.mdx new file mode 100644 index 0000000000..45b6c94250 --- /dev/null +++ b/docs/providers/documentation/sendgrid-provider.mdx @@ -0,0 +1,65 @@ +--- +title: "SendGrid" +sidebarTitle: "SendGrid Provider" +--- + +# SendGrid Provider + +SendGridProvider is a class that implements the SendGrid API and allows email sending through Keep. + +## Inputs +The `notify` function of `SendGridProvider` takes the following arguments: + +- `to` (str): Required. The email address of the recipient. +- `subject` (str): Required. The subject of the email. +- `html` (str): Required. The HTML body of the email. +- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. + +See [documentation](https://www.twilio.com/docs/sendgrid/api-reference) for more details. + +## Outputs +The `notify` function of `SendGridProvider` outputs the following format (example): +``` +{ + "status_code": 202, + "body": "", + "headers": { + "X-Message-Id": "G9RvW0ONQ0uK7eRfhHfZTQ" + } +} +``` +See [documentation](https://www.twilio.com/docs/sendgrid/api-reference) for more details. + +## Authentication Parameters +The SendGrid provider requires the following authentication parameters: + +- `api_key`: Required. SendGrid API key. You can obtain an API key by visiting [SendGrid API Keys](https://www.twilio.com/docs/sendgrid/api-reference/api-keys). +- `from_email`: Required. The email address from which the email is sent. + +## Connecting with the Provider +To connect with the SendGrid provider and send emails through Keep, follow these steps: + +1. Obtain a SendGrid API key: Visit [SendGrid API Keys](https://www.twilio.com/docs/sendgrid/api-reference/api-keys/) to obtain an API key if you don't have one already. +2. Configure the SendGrid provider in your system with the obtained API key and the `from_email` address. +3. Use the following YAML example to send an email notification using the SendGrid provider: + +``` +title=examples/alert_example.yml +# Send an email notification using the SendGrid provider. +alert: + id: email-notification + description: Send an email notification using SendGrid + actions: + - name: send-email + provider: + type: sendgrid + config: "{{ providers.sendgrid-provider }}" + with: + to: "recipient@example.com" + subject: "Hello from SendGrid Provider" + html: "

This is the email body.

" +``` + +## Useful Links +- [SendGrid API Keys](https://sendgrid.com/docs/ui/account-and-settings/api-keys/) +- [SendGrid API Reference](https://www.twilio.com/docs/sendgrid/api-reference) diff --git a/docs/providers/documentation/sentry-provider.mdx b/docs/providers/documentation/sentry-provider.mdx new file mode 100644 index 0000000000..cf1f80fcd5 --- /dev/null +++ b/docs/providers/documentation/sentry-provider.mdx @@ -0,0 +1,75 @@ +--- +title: "Sentry" +sidebarTitle: "Sentry Provider" +description: "Sentry provider allows you to query Sentry events and to pull/push alerts from Sentry" +--- + +## Inputs + +- `time: str = "14d"`: The time range for the query (e.g., `1d`) +- `project: str`: The project to query on. + +## Authentication Parameters + +The `api_key` and `organization_slug` are required for connecting to the Sentry provider. You can obtain them as described in the "Connecting with the Provider" section. + +`project_slug` is if you want to connect Sentry to a specific project within an organization. + + +To connect self hosted Sentry, you need to set the `api_url` parameter. Default value is `https://sentry.io/api/0/`. + + +## Connecting with the Provider + +### API Key + +To obtain the Sentry API key, follow these steps ([Docs](https://docs.sentry.io/product/integrations/integration-platform/?original_referrer=https%3A%2F%2Fwww.google.com%2F#internal-integrations)): + +1. Log in to your Sentry account. +2. Navigate `Settings` -> `Developer Settings` section. +3. Click on `Custom integrations`. +4. Click on `Create New Integration` on the top right side of the screen. +5. Select `Internal Integration` and click `Next` +6. Give the integration an indicative name, e.g. `Keep Integration` +7. From the permission section, select the required scopes as defined at the bottom of this page. +8. Click `Save Changes` +9. Scroll down to the bottom of the screen to the `TOKENS` section and copy the generated token -- This is the API key you will be using in Keep. + +### Organization Slug + +You can find the Organization Slug in your Sentry URL. +For example, this is our playground account: `https://keep-dr.sentry.io/` - The organization slug is `keep-dr`. + +To obtain the Organization Slug from the settings page: + +1. Log in to your Sentry account. +2. Navigate `Settings` -> `General Settings`. +3. Copy the Organization Slug from the Organization Slug input. + +## Scopes + +Certain scopes may be required to perform specific actions or queries via the Sentry Provider. Below is a summary of relevant scopes and their use cases: + +- `event:read` + | Required: `True` + | Description: `Read events and issues.` +- `project:read` + | Required: `True` + | Description: `Read projects in organization` +- `project:write` + | Required: `False` + | Description: `Write permission for projects in an organization.` (\*_Required for auto-webhook integration_) + +## Notes + + +When installing Sentry webhook integration, Keep enables built-in Webhook integration to all accessible projects and adds a new Alert that has an `Action` to send a notification via Webhooks to all accessible projects. + +You can achieve alerts pushing from Sentry to Keep using an `Internal Integration` which is not automated via the platform. [Contact us](mailto:founder@keephq.dev) to set it up. + + + +## Useful Links + +- [Sentry Integration Platform](https://docs.sentry.io/product/integrations/integration-platform/) +- [Sentry API Reference](https://docs.sentry.io/api/) diff --git a/docs/providers/documentation/service-now-provider.mdx b/docs/providers/documentation/service-now-provider.mdx new file mode 100644 index 0000000000..c517854500 --- /dev/null +++ b/docs/providers/documentation/service-now-provider.mdx @@ -0,0 +1,50 @@ +--- +title: "Service Now" +sidebarTitle: "Service Now Provider" +description: "Service Now provider allows sending notifications, updates, and retrieving topology information from the ServiceNow CMDB." +--- + +## Inputs + +- `content`: str : Message text to send as a notification or update +- `topology_query`: str (optional): A query to retrieve topology information from the ServiceNow CMDB. + +## Outputs + +- `result`: str : The result of the notification or update action. +- `topology`: dict : The topology information retrieved from the CMDB, if a topology query is provided. + +## Authentication Parameters + +The `instance_url` and `api_token` are required for connecting to the ServiceNow instance and performing any actions. + +## Connecting with the Provider + +1. Ensure that the ServiceNow instance is accessible via API. +2. Provide the necessary API credentials (`instance_url` and `api_token`) in the provider configuration. + +## Example of Usage + +```yaml +workflow: + id: service-now-example + description: Service Now example + triggers: + - type: manual + actions: + - name: service-now + provider: + type: service-now + config: "{{ providers.servicenow }}" + with: + content: "Incident update: Issue resolved" + - name: service-now-topology + provider: + type: service-now + config: "{{ providers.servicenow }}" + with: + topology_query: "SELECT * FROM cmdb_ci_server WHERE status='Active'" + + +## Useful Links +- [Service Now API documentation](https://docs.servicenow.com/bundle/xanadu-api-reference/page/build/applications/concept/api-rest.html) \ No newline at end of file diff --git a/docs/providers/documentation/signalfx-provider.mdx b/docs/providers/documentation/signalfx-provider.mdx new file mode 100644 index 0000000000..3dd3164a6c --- /dev/null +++ b/docs/providers/documentation/signalfx-provider.mdx @@ -0,0 +1,157 @@ +--- +title: "SignalFX" +sidebarTitle: "SignalFX Provider" +description: "SignalFX provider allows you get alerts from SignalFX Alerting via webhooks." +--- + +## Overview +SignalFX Provider enriches your monitoring and alerting capabilities by seamlessly integrating with SignalFX Alerting via webhooks. This integration allows you to receive alerts directly from SignalFX, ensuring you're promptly informed about significant events and metrics within your infrastructure. + +Key Features: +- Webhook Auto-Instrumentation: Automatically configures Keep as a Webhook Integration within SignalFX, subscribing to all available SignalFX Detectors and Rules for comprehensive monitoring. +- Manual and Automated Subscription Management: Provides flexibility in adding Keep as a subscriber to new Detectors either manually or by re-running the "setup webhook" feature from the UI for effortless maintenance. + +For further information or assistance, feel free to reach out on our Slack Community. + +## Connecting with the Provider +There are three approaches to connect with SignalFX: +- Push (Manually) - Install Keep as a Webhook Integration. +- Push (Auto Instrumentation) - Let Keep instrument itself as a webhook integration and subscribe to your SignalFx detectors. +- Pull - Keep will pull alerts from SignalFx. + + +The recommended way to install SignalFx is through Push (Auto Instrumentation). With this approach, you benefit from the advantages of the Push approach, which include more context (since SignalFx sends more context on Webhooks) and more real-time alerts, combined with the convenience of Pull integration (just supply credentials, and Keep will do the rest). + +In the following sections, we will elaborate on each approach. + + +### Push (Manually) +For more information about how SignalFx integrates with Webhooks, you can read https://docs.splunk.com/observability/en/admin/notif-services/webhook.html#webhook2 +1. From your SignalFx console, click on "Data Management": + + + + + +2. Click on "+ Add Integration" + + + + + +3. Change the "By Use Case" select to "All" and filter "webhook": + + + + + +4. Click on the Webhook tile and fill the following details: + + + + + + +5. Now, go to Detectors & SLOs page: + + + + + + +6. For every Detector and Rule, add Keep as Alert recipient: + + + + + + + + + +### Push (Auto Instrumentation) +With this approach: +1. Keep installs itself as Webhook Integration. +2. Keep iterates all Detectors and Rules, and will add itself as a subscriber + +The downside of this approach is that you'll need email/password of a user with admin role. This is due to SignalFx limitation on installing integrations: You can read more here - https://dev.splunk.com/observability/reference/api/integrations/latest#endpoint-create-integration + + + + + + +To install Keep with Push (auto instrumentation): +1. SF token with read permissions - go to Settings -> Access Tokens -> New Token + + + + + +2. email/password for a user with admin role - this will be used only for creating the Webhook Integration +3. orgid - this will be used only for creating the Webhook Integration + + + + + + +After we have all what we need, go to Keep and install the SignalFx provider: + + + + + + +### Pull +With this approach, Keep will pull alerts from SignalFx every time you refresh the console page. + +1. SF token with read permissions - go to Settings -> Access Tokens -> New Token + + + + + +2. In Keep's UI, install SignalFx Provider: + + + + + + +## Fingerprinting + +Fingerprints in SignalFx calculated based on (incidentId, detectorId). + +## Webhook Integration Modifications + +The automatic webhook integration gains access to the `API` authScope, which gives Keep the ability to read and write to the SignalFx API. + + +## Useful Links + +- [SignalFx Webhook](https://docs.splunk.com/observability/en/admin/notif-services/webhook.html#webhook2) diff --git a/docs/providers/documentation/signl4-provider.mdx b/docs/providers/documentation/signl4-provider.mdx new file mode 100644 index 0000000000..099bed1ea6 --- /dev/null +++ b/docs/providers/documentation/signl4-provider.mdx @@ -0,0 +1,48 @@ +--- +title: "SIGNL4 Provider" +description: "SIGNL4 offers critical alerting, incident response and service dispatching for operating critical infrastructure. It alerts you persistently via app push, SMS text and voice calls including tracking, escalation, collaboration and duty planning. Find out more at [signl4.com](https://www.signl4.com/)" +--- + +## Inputs + +The `notify` function in the `Signl4Provider` class takes the following parameters: + +```python +kwargs (dict): + title (str): Title of the SIGNL4 alert. *Required* + message (str): Alert message. + user (str): User, e.g. the requester of the incident. + s4_external_id (str): If the event originates from a record in a 3rd party system, use this parameter to pass the unique ID of that record. That ID will be communicated in outbound webhook notifications from SIGNL4, which is great for correlation / synchronization of that record with the alert. + s4_status (str): If you want to resolve an existing alert by an external id (s4_external_id), you can add this status parameter. It has three possible values. new: Default value which means that this event triggers a new alert. acknowledged: If you want to acknowledge a previously triggered alert (e.g. someone responded in the 3rd party system and not in the mobile app during business hours), set the s4_status to 'acknowledged' and provide an external ID via the s4_external_id parameter for the alert you want to acknowledge. It is only possible to acknowledge a Signl with a provided external id that initially triggered it. resolved: If you want to resolve a previously triggered alert (e.g. monitoring system has auto-closed the event), make sure to set the s4_status to 'resolved' and provide an external ID via the s4_external_id parameter for the alerts(s) you want to resolve. It is only possible to resolve a Signl with a provided external id that initially triggered it. + s4_service (str): Assigns the alert to the service / system category with the specified name. + s4_location (str): Transmit location information ('latitude, longitude') with your event and display a map in the mobile app. + s4_alerting_scenario (str): If this event triggers a Signl, allows to control how SIGNL4 notifies the team. single_ack: Only one person needs to acknowledge this alert. multi_ack: The Signl must be confirmed by the number of people who are on duty at the time this alert is created. emergency: All people in the team are notified regardless of their duty status and must acknowledge the Signl, which is also assigned to the built-in emergency category. + s4_filtering (bool): Specify a boolean value of true or false to apply event filtering for this event, or not. If set to true, the event will only trigger a notification to the team, if it contains at least one keyword from one of your services and system categories (i.e. it is whitelisted). +``` + +You can find more information [here](https://connect.signl4.com/webhook/docs/index.html). + +## Authentication Parameters + +The Signl4ProviderAuthConfig class takes the following parameters: +python +signl4_integration_secret (str): Your SIGNL4 integration or team secret. + +## Connecting with the Provider + +To use the Signl4Provider, you'll need to provide your signl4_integration_secret. + +You can find your integration or team secret in the SIGNL4 web portal under **Teams** or **Integrations** -> **Distribution Rules**. + +The signl4_integration_secret is used to post events to SIGNL4 using the webhook API. + +## Notes + +The provider uses either the events API or the incidents API to create an alert or an incident. The choice of API to use is determined by the presence of either a routing_key or an api_key. + +## Useful Links + +- SIGNL4: https://signl4.com/ +- SIGNL4 knowledge base: https://support.signl4.com/ +- SIGNL4 getting-started videos: https://www.youtube.com/watch?v=bwYSYOjMJZ8&list=PL9FRxukdQyk9QRZPOEH3jhRX9WQCovCc6 +- SIGNL4 videos: https://vimeo.com/showcase/signl4 diff --git a/docs/providers/documentation/site24x7-provider.mdx b/docs/providers/documentation/site24x7-provider.mdx new file mode 100644 index 0000000000..affc91a17d --- /dev/null +++ b/docs/providers/documentation/site24x7-provider.mdx @@ -0,0 +1,96 @@ +--- +title: "Site24x7 Provider" +description: "The Site24x7 Provider allows you to install webhooks and receive alerts in Site24x7. It manages authentication, setup of webhooks, and retrieval of alert logs from Site24x7." +--- + +## Inputs + +The `Site24x7Provider` class handles authentication and interacts with the Site24x7 API to install webhooks and fetch alerts. Here are the primary methods and their parameters: + +### Main Class Methods + +- **`setup_webhook(tenant_id, keep_api_url, api_key, setup_alerts)`** + - `tenant_id (str)`: Tenant identifier. + - `keep_api_url (str)`: URL to send alert data. + - `api_key (str)`: API key for authentication. + - `setup_alerts (bool)`: Whether to setup alerting capabilities (default is True). + +- **`_get_alerts()`** + - Returns a list of `AlertDto` objects representing the alerts. + +### Authentication Parameters + +The `Site24x7ProviderAuthConfig` class is used for API authentication and includes: + +- **`zohoRefreshToken (str)`**: Refresh token for Zoho authentication. *Required* +- **`zohoClientId (str)`**: Client ID for Zoho authentication. *Required* +- **`zohoClientSecret (str)`**: Client Secret for Zoho authentication. *Required* +- **`zohoAccountTLD (str)`**: Top-Level Domain for the Zoho account. Options include `.com`, `.eu`, `.com.cn`, `.in`, `.com.au`, `.jp`. *Required* + +## Connecting with the Provider + +To use the Site24x7 Provider, initialize it with the necessary authentication credentials and provider configuration. Ensure that your Zoho account credentials (Client ID, Client Secret, and Refresh Token) are correctly set up in the `Site24x7ProviderAuthConfig`. + +## Steps to Obtain a Refresh Token + +1. **Registration and Client Credentials:** + - Navigate to [Zoho API Console](https://api-console.zoho.com/). + - Sign in or sign up using the email associated with your Site24x7 account. + - Register your application using the "Self Client" option to get your Client ID and Client Secret. + +2. **Generating Grant Token:** + - Go to the Zoho Developer Console and access your registered Self Client. + - In the "Generate Code" tab, input the required scopes (`Site24x7.Admin.Read, Site24x7.Admin.Create, Site24x7.Operations.Read`), description, and time duration. + - Click "Generate" and copy the provided code. + +3. **Generating Access and Refresh Tokens:** + - Use the grant token to make a POST request to `https://accounts.zoho.com/oauth/v2/token` to obtain the access and refresh tokens. + + ```bash + curl -X POST 'https://accounts.zoho.com/oauth/v2/token' \ + -d 'client_id=your_client_id' \ + -d 'client_secret=your_client_secret' \ + -d 'code=your_grant_token' \ + -d 'grant_type=authorization_code' + + ``` + + OR + + ```python + import requests + + response = requests.post( + 'https://accounts.zoho.com/oauth/v2/token', + data={ + 'client_id': 'your_client_id', + 'client_secret': 'your_client_secret', + 'code': 'your_grant_token', + 'grant_type': 'authorization_code' + } + ) + refresh_token = response.json().get('refresh_token') + ``` + +--- +## Notes + +- Ensure that the necessary scopes **Site24x7.Admin.Read, Site24x7.Admin.Create, Site24x7.Operations.Read** are included when generating the grant token, as they dictate the API functionalities accessible via the provider. +- Zoho API Console [Link](https://api-console.zoho.com) + +## Webhook Integration Modifications + +The webhook integration grants Keep access to the following scopes within Site24x7: +- `authenticated` +- `valid_tld` + +The webhook can be accessed via the "Alarms" section in the Site24x7 console. + +--- + +## Useful Links + +- [Site24x7 API Documentation](https://www.site24x7.com/help/api/) +- [Zoho OAuth Documentation](https://www.zoho.com/accounts/protocol/oauth/web-apps.html) +- [Site 24x7 Authentication Guide](https://www.site24x7.com/help/api/#authentication) +- [Third Party and Webhook Integrations](https://www.site24x7.com/help/api/#third-party-integrations) diff --git a/docs/providers/documentation/slack-provider.mdx b/docs/providers/documentation/slack-provider.mdx new file mode 100644 index 0000000000..d4360c6eef --- /dev/null +++ b/docs/providers/documentation/slack-provider.mdx @@ -0,0 +1,74 @@ +--- +title: "Keep's integration for Slack" +sidebarTitle: "Integration for Slack" +description: "Enhance your Keep workflows with direct Slack notifications. Simplify communication with timely updates and alerts directly within Slack." +--- + +## Overview + +Keep's integration for Slack enables seamless communication by allowing you to send notifications to Slack. This integration is designed to streamline your processes, ensuring your team remains informed with real-time updates. + +### Key Features + +- **Direct Notifications**: Utilize Keep to send messages directly to your Slack channels. +- **Flexible Configuration**: Easily configure alerts based on specific triggers within your Keep workflows. +- **Interactive Messages**: Enhance your Slack messages with interactive components like buttons and inputs. +- **Editable Messages**: Update existing Slack messages dynamically based on changes in alert status or other workflow outcomes, ensuring that your notifications reflect the most current information. + +## Getting Started + +## Authentication Methods + +Keep's integration for Slack supports two primary authentication methods: + +- **Webhook URL**: For simple notifications, use the webhook URL associated with your Slack channel. +- **OAuth 2.0**: For a more integrated experience, authorize Keep using Slack's OAuth 2.0 flow. This method is particularly useful for applications requiring access to more Slack features. + +### Installation + +1. **Add to Slack**: Begin by clicking the "Add to Slack" button on this page. You'll be guided through the OAuth authorization process to connect Keep with your Slack workspace. + + Add to Slack + +2. **Installation Confirmation**: After adding Keep to Slack, you'll be redirected to a confirmation page. This page will confirm the successful installation and provide the next steps to fully leverage Slack notifications within your Keep workflows. + +### OAuth Flow + +The OAuth flow simplifies the connection between Keep and Slack, providing a secure method to authenticate and authorize. + +1. **Initiate OAuth**: Click the "Slack" Provider in the [Platform](https://platform.keephq.dev). +![OAuth Authorization](/images/slack/slack-oauth.png) +2. **Authorize Keep**: Follow the prompts to authorize Keep to access your Slack workspace. + +### Setup + +1. **Create a Slack App**: If you haven't already, create a Slack app in the [Slack API Dashboard](https://api.slack.com/apps). +2. **Enable Incoming Webhooks**: In your Slack app settings, enable Incoming Webhooks and create a webhook for the channel you wish to post messages to. +3. **Use Your Webhook URL**: Within Keep, use the webhook URL to send notifications to your chosen Slack channel. + +## Using Keep's integration for Slack + +With Keep's integration for Slack installed, you're ready to enhance your workflows with Slack notifications. Here's how to get started: + +1. **Workflow Integration**: In Keep, select the workflow you wish to add Slack notifications to. Add a Slack notification block and configure it with your message or alert criteria. + + ![Workflow Configuration](/images/slack/slack-workflow.png) + +2. **Send a Test Notification**: Ensure your setup is correct by sending a test notification through your configured workflow, use the "Run Manually" link for that.. + +### Inputs + +The `notify` function take following parameters as inputs: + +- `message`: Required. Message text to send to Slack +- `blocks`: Optional. Array of interactive components like inputs, buttons +- `channel`: Optional. The channel ID to send to if using the OAuth integration. + + +## Useful Links + +- [Slack API Documentation](https://api.slack.com/messaging/webhooks) +- [Keep Privacy Policy](https://www.keephq.dev/privacy-policy) +- [Keep Pricing Information](https://www.keephq.dev/pricing) + +For support and further assistance, shoot us a message over [Slack](https://slack.keephq.dev) (pun intended ;)) diff --git a/docs/providers/documentation/smtp-provider.mdx b/docs/providers/documentation/smtp-provider.mdx new file mode 100644 index 0000000000..2246befcb0 --- /dev/null +++ b/docs/providers/documentation/smtp-provider.mdx @@ -0,0 +1,25 @@ +--- +title: 'SMTP' +sidebarTitle: 'SMTP Provider' +description: 'SMTP Provider allows you to send emails.' +--- + +## Overview + +SMTP Provider allows you to send emails from Keep. Most of the email services like Gmail, Yahoo, Mailgun, etc. provide SMTP servers to send emails. You can use these SMTP servers to send emails from Keep. + +## Authentication Parameters + +The SMTP provider requires the following authentication parameters: + +- `SMTP Username` - The username of the SMTP server or the email address. +- `SMTP Password` - The password of the SMTP server. +- `SMTP Server Address` - The host address of the SMTP server. Example: `smtp.gmail.com`. +- `SMTP Port` - The port of the SMTP server. It is `587` for TLS and `465` for SSL. If you are using a custom SMTP server, you can use the port provided by the SMTP server. +- `SMTP Encryption` - The security protocol of the SMTP server. It can be `SSL`, or `TLS`. + +## Connecting with SMTP Provider + +1. Obtain the SMTP credentials from your email service provider. Example: Gmail, Yahoo, Mailgun, etc. +2. Add SMTP Provider in Keep with the obtained credentials. +3. Connect the SMTP Provider with Keep. diff --git a/docs/providers/documentation/snowflake-provider.mdx b/docs/providers/documentation/snowflake-provider.mdx new file mode 100644 index 0000000000..1a61d79379 --- /dev/null +++ b/docs/providers/documentation/snowflake-provider.mdx @@ -0,0 +1,29 @@ +--- +title: "Snowflake" +sidebarTitle: "Snowflake Provider" +description: "Template Provider is a template for newly added provider's documentation" +--- + +## Inputs + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Authentication Parameters + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Connecting with the Provider + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Useful Links + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ diff --git a/docs/providers/documentation/splunk-provider.mdx b/docs/providers/documentation/splunk-provider.mdx new file mode 100644 index 0000000000..88956cd8be --- /dev/null +++ b/docs/providers/documentation/splunk-provider.mdx @@ -0,0 +1,37 @@ +--- +title: "Splunk" +sidebarTitle: "Splunk Provider" +description: "Splunk provider allows you to get Splunk `saved searches` via webhook installation" +--- + +## Authentication Parameters +The Splunk provider requires the following authentication parameter: + +- `Splunk UseAPI Key`: Required. This is your Splunk account username, which you use to log in to the Splunk platform. +- `Host`: This is the hostname or IP address of the Splunk instance you wish to connect to. It identifies the Splunk server that the API will interact with. +- `Port`: This is the network port on the Splunk server that is listening for API connections. The default port for Splunk's management API is typically 8089. +- `` + +## Connecting with the Provider + +Obtain Splunk API Token: +1. Ensure you have a Splunk account with the necessary [permissions](https://docs.splunk.com/Documentation/Splunk/9.2.0/Security/Rolesandcapabilities). The basic permissions required are `list_all_objects` & `edit_own_objects`. +2. Get an API token for authenticating API requests. [Read More](https://docs.splunk.com/Documentation/Splunk/9.2.0/Security/Setupauthenticationwithtokens) on how to set up and get API Keys. + +Identify Your Splunk Instance Details: +1. Determine the Host (IP address or hostname) and Port (default is 8089 for Splunk's management API) of the Splunk instance you wish to connect to. + +--- +**NOTE** +Make sure to follow this [Guide](https://docs.splunk.com/Documentation/Splunk/9.2.0/Alert/ConfigureWebhookAllowList) to configure your webhook allow list to allow your `keep` deployment. +--- + + +## Useful Links + +- [Splunk Python SDK](https://dev.splunk.com/view/python-sdk/SP-CAAAEBB) +- [Splunk Webhook](https://docs.splunk.com/Documentation/Splunk/9.2.0/Alert/Webhooks) +- [Splunk Webhook Allow List](https://docs.splunk.com/Documentation/Splunk/9.2.0/Alert/ConfigureWebhookAllowList) +- [Splunk Permissions and Roles](https://docs.splunk.com/Documentation/Splunk/9.2.0/Security/Rolesandcapabilities) +- [Splunk API tokens](https://docs.splunk.com/Documentation/Splunk/9.2.0/Security/Setupauthenticationwithtokens) + diff --git a/docs/providers/documentation/squadcast-provider.mdx b/docs/providers/documentation/squadcast-provider.mdx new file mode 100644 index 0000000000..d7154ad473 --- /dev/null +++ b/docs/providers/documentation/squadcast-provider.mdx @@ -0,0 +1,45 @@ +--- +title: "Squadcast Provider" +sidebarTitle: "Squadcast Provider" +description: "Squadcast provider is a provider used for creating issues in Squadcast" +--- + +## Inputs + +The `notify` function take following parameters as inputs: + +- `notify_type` (required): Takes either of `incident` or `notes` depending on weather you want to create an incident or a note. +1. ##### parameters for `incident` + - `message` (required): This will be the incident message. + - `description` (required): This will be the incident description. + - `tags` (optional): Tags for the incident. It should be a dict format. + - `priority` (optional): Priority of the incident. + - `status` (optional): Status of the event. + - `event_id` (optional): event_id is used to resolve an incident + - `additional_json` (optional): Additional JSON data to be sent with the incident. +2. ##### parameters for `notes` + - `message` (required): The message of the note. + - `incident_id` (required): Id of the incident where the Note has to be created. + - `attachments` (optional): List of attachments for the notes. + +See [documentation](https://support.squadcast.com/integrations/incident-webhook-incident-webhook-api) for more + +## Authentication Parameters +The Squadcast provider requires at least one of the following authentication parameter: + +- `refresh_token` (optional): Your Squadcast refresh_token. +- `webhook_url` (optional): URL of your `incidents_webhook`. + +See [Squadcast Refresh Tokens](https://support.squadcast.com/terraform-and-api-documentation/public-api-refresh-token#from-your-profile-page) for more. + +## Connecting with the Provider + +1. Go to [Refresh Tokens](https://support.squadcast.com/terraform-and-api-documentation/public-api-refresh-token#from-your-profile-page) to see how to create a `refresh_token`. +2. Visit [Documentations](https://support.squadcast.com/integrations/incident-webhook-incident-webhook-api) to learn how to setup `incident_webhooks` & get the `webhook_url` + + +## Useful Links + +- [Squadcast Incident API](https://support.squadcast.com/integrations/incident-webhook-incident-webhook-api) +- [Squadcast Refresh Tokens](https://support.squadcast.com/terraform-and-api-documentation/public-api-refresh-token#from-your-profile-page) +- [Incident Notes](https://support.squadcast.com/incidents-page/incident-notes) diff --git a/docs/providers/documentation/ssh-provider.mdx b/docs/providers/documentation/ssh-provider.mdx new file mode 100644 index 0000000000..0ec8acd469 --- /dev/null +++ b/docs/providers/documentation/ssh-provider.mdx @@ -0,0 +1,36 @@ +--- +title: "SSH" +sidebarTitle: "SSH Provider" +description: "The `SSH Provider` is a provider that provides a way to execute SSH commands and get their output." +--- + +## Inputs + +- command [**mandatory**]: The command to be executed +- \*\*kwargs [**optional**]: Extra parameters to be formatted in the command (can be other steps output for example) + +## Outputs + +List of lines read from the remote SSH server, both the **stdout** and the **stderr** + +## Authentication Parameters + +This section describes the authentication configuration required for the `SshProvider`. The authentication configuration includes the following fields: + +- `host`: The hostname of the SSH server. +- `user`: The username to use for the SSH connection. +- `port`: The port to use for the SSH connection. Defaults to 22. +- `pkey`: The private key to use for the SSH connection. If provided, the connection will be established using this private key instead of a password. +- `password`: The password to use for the SSH connection. If the private key is not provided, the connection will be established using this password. + +## Connecting with the Provider + +The `SshProvider` class provides a way to execute SSH commands and get their output. The class uses the `paramiko` library to establish an SSH connection to a server and execute commands. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Useful Links + +- https://www.ssh.com/academy/ssh/keygen diff --git a/docs/providers/documentation/statuscake-provider.mdx b/docs/providers/documentation/statuscake-provider.mdx new file mode 100644 index 0000000000..d4bde36444 --- /dev/null +++ b/docs/providers/documentation/statuscake-provider.mdx @@ -0,0 +1,24 @@ +--- +title: "StatusCake" +sidebarTitle: "StatusCake Provider" +description: "StatusCake allows you to monitor your website and APIs and send alert to keep" +--- + +## Authentication Parameters + +The StatusCake provider requires the following authentication parameters: + +- `Statuscake API Key`: The API key for the StatusCake account. This is required for the StatusCake provider. + +## Connecting with the Provider + +Obtain StatusCake API Key + +1. Create an account on [StatusCake](https://www.statuscake.com/). +2. After logging in, go to the My Account under [Account Settings](https://app.statuscake.com/User.php) +3. Under Manage API Keys, generate a new API key or use the default key. +4. Copy the API Key. This will be used as the `Statuscake API Key` in the provider settings. + +## Usefull Links + +- [StatusCake](https://www.statuscake.com/) diff --git a/docs/providers/documentation/sumologic-provider.mdx b/docs/providers/documentation/sumologic-provider.mdx new file mode 100644 index 0000000000..6e1be21b29 --- /dev/null +++ b/docs/providers/documentation/sumologic-provider.mdx @@ -0,0 +1,36 @@ +--- +title: "SumoLogic Provider" +sidebarTitle: "SumoLogic Provider" +description: "The SumoLogic provider enables webhook installations for receiving alerts in keep" +--- + +## Overview + +The SumoLogic provider facilitates receiving alerts from Monitors in SumoLogic using a Webhook Connection. + +## Authentication Parameters + +- `sumoLogicAccessId`: API key for authenticating with SumoLogic's API. +- `sumoLogicAccessKey`: API key for authenticating with SumoLogic's API. +- `deployment`: API key for authenticating with SumoLogic's API. + +## Scopes + +- `authenticated`: Mandatory for all operations, ensures the user is authenticated. +- `authorized`: Mandatory for querying incidents, ensures the user has read access. + +## Connecting with the Provider + +1. Follow the instructions [here](https://help.sumologic.com/docs/manage/security/access-keys/) to get your Access Key & Access ID +2. Make sure the user has roles with the following capabilities: + - `manageScheduledViews` + - `manageConnections` + - `manageUsersAndRoles` +3. Find your `deployment` from [here](https://api.sumologic.com/docs/#section/Getting-Started/API-Endpoints), keep will automatically figure out your endpoint. + +## Useful Links + +- [SumoLogic API Documentation](https://api.sumologic.com/docs/#section/Getting-Started) +- [SumoLogic Access_Keys](https://help.sumologic.com/docs/manage/security/access-keys/) +- [SumoLogic Roles Management](https://help.sumologic.com/docs/manage/users-roles/roles/create-manage-roles/) +- [SumoLogic Deployments](https://api.sumologic.com/docs/#section/Getting-Started/API-Endpoints) diff --git a/docs/providers/documentation/teams-provider.mdx b/docs/providers/documentation/teams-provider.mdx new file mode 100644 index 0000000000..2a907da426 --- /dev/null +++ b/docs/providers/documentation/teams-provider.mdx @@ -0,0 +1,53 @@ +--- +title: "Teams Provider" +sidebarTitle: "Teams Provider" +description: "Teams Provider is a provider that allows to notify alerts to Microsoft Teams chats." +--- + +## Inputs + +The `notify` function in the `TeamsProvider` class takes the following parameters: + +```python +kwargs (dict): + message (str): The message to send. *Required* + typeCard (str): The card type. (MessageCard is default) + themeColor (str): Hexadecimal color. + sections (array): Array of custom informations +``` + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ + +## Authentication Parameters + +The TeamsProviderAuthConfig class takes the following parameters: + +- `webhook_url` (str): associated with the channel requires to trigger the message to the respective channel. _Required_ + +## Connecting with the Provider + +1. Open the Microsoft Teams application or website and select the team or channel where you want to add the webhook. + +2. Click on the three-dot icon next to the team or channel name and select "Connectors" from the dropdown menu. + +3. Search for "Incoming Webhook" and click on the "Add" button. + +4. Give your webhook a name and an optional icon, then click on the "Create" button. + +5. Copy the webhook URL that is generated and save it for later use. + +6. Select the options that you want to configure for your webhook, such as the default name and avatar that will be used when posting messages. + +7. Click on the "Save" button to save your webhook settings. + +You can now use the webhook URL to send messages to the selected channel or team in Microsoft Teams. + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the bottom of the page_ + +## Useful Links + +- https://learn.microsoft.com/pt-br/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook diff --git a/docs/providers/documentation/telegram-provider.mdx b/docs/providers/documentation/telegram-provider.mdx new file mode 100644 index 0000000000..89ff0b4382 --- /dev/null +++ b/docs/providers/documentation/telegram-provider.mdx @@ -0,0 +1,34 @@ +--- +title: "Telegram Provider" +description: "Telegram Provider is a provider that allows to notify alerts to telegram chats." +--- + +## Inputs + +The `notify` function in the `TelegramProvider` class takes the following parameters: + +```python +kwargs (dict): + message (str): The message to send. *Required* + chat_id (str): The chat_id of which to send the message to. *Required* (How to get chat id - https://stackoverflow.com/questions/32423837/telegram-bot-how-to-get-a-group-chat-id) +``` + +## Authentication Parameters + +The TelegramProviderAuthConfig class takes the following parameters: + +- bot_token (str): The bot of the token. \*Required\*\* + +## Connecting with the Provider + +To use the Telegram Provider you'll need a bot token. +How to create telegram bot - https://core.telegram.org/bots#how-do-i-create-a-bot + +## Useful Links + +- Telegram Bot docs - https://core.telegram.org/bots +- Telegram how to get chat id - https://stackoverflow.com/questions/32423837/telegram-bot-how-to-get-a-group-chat-id + +## Example + +See `examples/alerts/db_disk_space_telegram.yml` for a full working example. diff --git a/docs/providers/documentation/template.mdx b/docs/providers/documentation/template.mdx new file mode 100644 index 0000000000..5eff334d85 --- /dev/null +++ b/docs/providers/documentation/template.mdx @@ -0,0 +1,28 @@ +--- +title: "Template" +description: "Template Provider is a template for newly added provider's documentation" +--- + +## Inputs + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Outputs + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Authentication Parameters + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Connecting with the Provider + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Notes + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ + +## Useful Links + +_No information yet, feel free to contribute it using the "Edit this page" link the buttom of the page_ diff --git a/docs/providers/documentation/trello-provider.mdx b/docs/providers/documentation/trello-provider.mdx new file mode 100644 index 0000000000..8c38148c2f --- /dev/null +++ b/docs/providers/documentation/trello-provider.mdx @@ -0,0 +1,33 @@ +--- +title: "Trello" +sidebarTitle: "Trello Provider" +description: "Trello provider is a provider used to query data from Trello" +--- + +## Inputs + +The `query` function take following parameters as inputs: + +- `board_id`: Required. Trello board id +- `filter`: Optional. Comma seperated list of trello events that want to query, default value is 'createCard' + +## Outputs + +## Authentication Parameters + +The `query` function requires an `api_key` and `api_token` from Trello, which can obtained by making custom power-up in Trello admin. + +## Connecting with the Provider + +1. Go to https://trello.com/power-ups/admin to create custom power-up. +2. Create new power-up and add basic details like name, email address, etc. +3. Once it is created, navigate inside power-up and go to API Key section. +4. There click on `Generate a new API key` and it will generate API Key, that will be used as `api_key`. +5. For generating `api_token`, there is option to generate Token manually, click on that and authorize the application. + +## Notes + +## Useful Links + +- https://developer.atlassian.com/cloud/trello/guides/power-ups/your-first-power-up/ +- https://trello.com/power-ups/admin diff --git a/docs/providers/documentation/twilio-provider.mdx b/docs/providers/documentation/twilio-provider.mdx new file mode 100644 index 0000000000..b0f3d959a1 --- /dev/null +++ b/docs/providers/documentation/twilio-provider.mdx @@ -0,0 +1,32 @@ +--- +title: "Twilio Provider" +description: "Twilio Provider is a provider that allows to notify alerts via SMS using Twilio." +--- + +## Inputs + +The `notify` function in the `TwilioProvider` class takes the following parameters: + +```python +kwargs (dict): + message_body (str): The message to send. *Required* + to_phone_number (str): The phone number to which you want to send SMS. *Required* +``` + +## Authentication Parameters + +The TwilioProviderAuthConfig class takes the following parameters: + +- account_sid (str): Twilio account SID. \*Required\*\* +- api_token (str): Twilio API token. \*Required\*\* +- from_phone_number (str): Twilio phone number from which SMS alert will be sent. \*Required\*\* + +## Connecting with the Provider + +To use the Twilio Provider you'll need API token. +How to create Twilio API token - https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them + +## Useful Links + +- Twilio API token - https://support.twilio.com/hc/en-us/articles/223136027-Auth-Tokens-and-How-to-Change-Them +- Twilio phone number - https://www.twilio.com/en-us/guidelines/regulatory diff --git a/docs/providers/documentation/uptimekuma-provider.mdx b/docs/providers/documentation/uptimekuma-provider.mdx new file mode 100644 index 0000000000..b52f02769d --- /dev/null +++ b/docs/providers/documentation/uptimekuma-provider.mdx @@ -0,0 +1,31 @@ +--- +title: "UptimeKuma" +sidebarTitle: "UptimeKuma Provider" +description: "UptimeKuma allows you to monitor your website and APIs and send alert to keep" +--- + +## Authentication Parameters + +The UptimeKuma provider requires the following authentication parameters: + +- `UptimeKuma Host URL`: The URL of the UptimeKuma instance. This is required for the UptimeKuma provider. +- `UptimeKuma Username`: The username for the UptimeKuma account. This is required for the UptimeKuma provider. +- `UptimeKuma Password`: The password for the UptimeKuma account. This is required for the UptimeKuma provider. + +## Connecting with the Provider + +Obtain UptimeKuma Host URL, Username and Password + +1. UptimeKuma can only be self-hosted. You need to have an instance of UptimeKuma running. +2. After setting up UptimeKuma, you can obtain the Host URL, Username and Password. +3. Use the obtained Host URL, Username and Password in the provider settings. + +## Webhooks Integration + +1. Connect to UptimeKuma provider with the required parameters. +2. Use the Keep Backend API URL as the Host URL in UptimeKuma. [https://api.keephq.dev](https://api.keephq.dev) (Default) +3. Navigate to Account Settings in Keep, proceed to API Keys, and generate a API Key for Webhook. + +## Usefull Links + +- [UptimeKuma](https://uptime.kuma.pet/) diff --git a/docs/providers/documentation/victoriametrics-provider.mdx b/docs/providers/documentation/victoriametrics-provider.mdx new file mode 100644 index 0000000000..5b6f8f760b --- /dev/null +++ b/docs/providers/documentation/victoriametrics-provider.mdx @@ -0,0 +1,59 @@ +--- +title: "Victoriametrics Provider" +sidebarTitle: "Victoriametrics Provider" +description: "The VictoriametricsProvider allows you to fetch alerts in Victoriametrics." +--- + +## Authentication Parameters + +The Victoriametrics provider requires the following authentication parameters: + +- `VMAlertHost`: The hostname or IP address where VMAlert is running. Example: `localhost`, `192.168.1.100`, or `vmalert.mydomain.com`. +- `VMAlertPort`: The port number on which VMAlert is listening. Example: 8880 (if VMAlert is set to listen on port 8880). + +## Connecting with the Provider + +1. Ensure you have a running instance of VMAlert accessible by the host and port specified. +2. Include the host and port information in your Victoriametrics provider configuration when initializing the provider. + +## Querying Victoriametrics + +The Victoriametrics provider allows you to query from Victoriametrics through `query` and `query_range` types. The following are the parameters available for querying: + +1. `query` type: + - `query`: The query to execute on Victoriametrics. Example: `sum(rate(http_requests_total{job="api-server"}[5m]))`. + - `start`: The time to query the data for. Example: `2024-01-01T00:00:00Z` + +2. `query_range` type: + - `query`: The query to execute on Victoriametrics. Example: `sum(rate(http_requests_total{job="api-server"}[5m]))`. + - `start`: The start time to query the data for. Example: `2024-01-01T00:00:00Z` + - `end`: The end time to query the data for. Example: `2024-01-01T00:00:00Z` + - `step`: The step size to use for the query. Example: `15s` + +## Push alerts to keep using webhooks + +You can push alerts to keep without connecting to Victoriametrics This provider takes advantage of configurable webhooks available with Prometheus Alertmanager. Use the following template to configure AlertManager: +```yml +route: + receiver: "keep" + group_by: ['alertname'] + group_wait: 15s + group_interval: 15s + repeat_interval: 1m + continue: true + +receivers: +- name: "keep" + webhook_configs: + - url: '{keep_webhook_api_url}' + send_resolved: true + http_config: + basic_auth: + username: api_key + password: {api_key} +``` + +## Useful Links + +- [Victoriametrics](https://victoriametrics.com/docs/) +- [VMAlert](https://victoriametrics.github.io/vmalert.html) diff --git a/docs/providers/documentation/webhook-provider.mdx b/docs/providers/documentation/webhook-provider.mdx new file mode 100644 index 0000000000..bfcf1a38c4 --- /dev/null +++ b/docs/providers/documentation/webhook-provider.mdx @@ -0,0 +1,17 @@ +--- +title: 'Webhook' +sidebarTitle: 'Webhook Provider' +description: 'A webhook is a method used to send real-time data from one application to another whenever a specific event occurs' +--- + +## Authentication Parameters + +The Webhook provider requires the following authentication parameters: + +- `Webhook URL`: The URL to send the webhook to. +- `HTTP Method`: The HTTP method to use when sending the webhook. Default is `POST`. Supported methods are `GET`, `POST`, `PUT` and `DELETE`. +- `HTTP basic authentication - Username`: The username to use for HTTP basic authentication. +- `HTTP basic authentication - Password`: The password to use for HTTP basic authentication. +- `API key`: The API key to use for authentication. +- Supports both HTTP Auth and API Key authentication. +- `Headers`: Custom headers to send with the webhook. diff --git a/docs/providers/documentation/websocket-provider.mdx b/docs/providers/documentation/websocket-provider.mdx new file mode 100644 index 0000000000..76299436bd --- /dev/null +++ b/docs/providers/documentation/websocket-provider.mdx @@ -0,0 +1,68 @@ +--- +title: "Websocket" +--- + +# Websocket Provider + +WebsocketProvider is a class that implements a simple websocket provider. + +## Inputs +The `query` function of `WebsocketProvider` takes the following arguments: + +- `socket_url` (str): The websocket URL to query. +- `timeout` (int | None, optional): Connection Timeout. Defaults to None. +- `data` (str | None, optional): Data to send through the websocket. Defaults to None. +- `**kwargs` (optional): Additional optional parameters can be provided as key-value pairs. + +See [documentation](https://websocket-client.readthedocs.io/en/latest/api.html#websocket.WebSocket.send) for more information. + +## Outputs +The `query` function of `WebsocketProvider` outputs the following format: + +```json +{ + "connection": true, + "data": "Received data from the websocket" +} +``` + +The `connection` field indicates whether the websocket connection was successful (`true`) or not (`false`). The `data` field contains the received data from the websocket. +If the `connection` field indicates unsuccessful connection (`false`) then the object will also include an `error` field with details about the failed connection. + + +## Authentication Parameters +The Websocket provider does not require any specific authentication parameters. + +## Connecting with the Provider +To connect with the Websocket provider and perform queries, follow these steps: + +Initialize the provider and provider configuration in your system. +Use the query function of the WebsocketProvider to interact with the websocket. + +Example usage: +```yaml +alert: + id: check-websocket-is-up + description: Monitor that this HTTP endpoint is up and running + steps: + - name: websocket-test + provider: + type: websocket + with: + socket_url: "ws://echo.websocket.events" + actions: + - name: trigger-slack-websocket + condition: + - name: assert-condition + type: assert + assert: "{{ steps.websocket-test.results.connection }} == true" + provider: + type: slack + config: "{{ providers.slack-demo }}" + with: + message: "Could not connect to ws://echo.websocket.events using websocket" + on-failure: + provider: + type: slack + config: "{{ providers.slack-demo }}" +``` diff --git a/docs/providers/documentation/zabbix-provider.mdx b/docs/providers/documentation/zabbix-provider.mdx new file mode 100644 index 0000000000..4bebd743c6 --- /dev/null +++ b/docs/providers/documentation/zabbix-provider.mdx @@ -0,0 +1,107 @@ +--- +title: "Zabbix" +sidebarTitle: "Zabbix Provider" +description: "Zabbix provider allows you to pull/push alerts from Zabbix" +--- + + + Please note that we currently only support Zabbix of version 6 and above + (6.0^) + + +## Authentication Parameters + +The `zabbix_frontend_url` and `auth_token` are required for connecting to the Zabbix provider. You can obtain them as described in the ["Connecting with the Provider"](./zabbix-provider#connecting-with-the-provider) section. + +## Connecting with the Provider + +### API Key + +To obtain Zabbix authentication token, follow the following steps, divided in to 3 categories ([Docs](https://www.zabbix.com/documentation/current/en/manual/web_interface/frontend_sections/users/api_tokens)): + +First, login in to your Zabbix account (the provided `zabbix_frontend_url`) with a privileged user. + +#### Create a User Role + +1. Navigate to `Users` -> `User Roles` section. +2. In the top right corner of the screen, click `Create user role` +3. Give the role an indicative name (e.g. Keep Role) +4. In the `User type` selectbox, select `Super Admin` + +- This is because some of the scopes we need are available to `Super Admin` user type only. [See here](https://www.zabbix.com/documentation/current/en/manual/api/reference/mediatype/create) + +5. Remove all the checkboxes from everything, except 1 random `Access to UI elemets` which is required for any role. +6. In the `API methods` section, select `Allow list` and fill in the scopes as [mentioned below](./zabbix-provider#scopes), in the Scopes section. + + + + +#### Create a user + +1. Navigate to `Users` -> `Users` section. +2. Follow the instructions to add a new user. Give it an indicative username (e.g. KeepUser) +3. In the `Permissions` tab, select the Role you have just created. +4. Click `Add` + +#### Create API token + +1. Navigate to `Users` -> `API tokens` section. +2. In the top right corner of the screen, click `Create API token` +3. Give the API token an indicative name (e.g. Keep Token) +4. Select the user you have just created +5. Unselect the `Set expiration date and time` checkbox and click `Add` +6. Copy the generated API token and keep it for further use in Keep. + +## Scopes + +Certain scopes may be required to perform specific actions or queries via Zabbix Provider. Below is a summary of relevant scopes and their use cases: + +- `problem.get` + | Required: `True` + | Description: `The method allows to retrieve problems.` +- `mediatype.get` + | Required: `False` + | Required for Webhook: `True` + | Description: `The method allows to retrieve media types.` +- `mediatype.update` + | Required: `False` + | Required for Webhook: `True` + | Description: `This method allows to update existing media types.` +- `mediatype.create` + | Required: `False` + | Required for Webhook: `True` + | Description: `This method allows to create new media types.` +- `user.get` + | Required: `False` + | Required for Webhook: `True` + | Description: `The method allows to retrieve users.` +- `user.update` + | Required: `False` + | Required for Webhook: `True` + | Description: `This method allows to update existing users.` + +## Notes + + + When installing Zabbix webhook, Keep automatically adds a new media type of + type Keep to your media types. + + After the new media type is added, Keep + automatically adds this mediatype as a media to all existing users, in order + to get all alerts incoming from Zabbix. + + +## Webhook Integration Modifications + +The automatic webhook integration grants Keep access to the following scopes within the Zabbix instance: +- `mediatype.get` +- `mediatype.update` +- `mediatype.create` +- `user.get` +- `user.update` + +You can view the webhook settings under **Alerts > Media Types** + +## Useful Links + +- [Zabbix API](https://www.zabbix.com/documentation/current/en/manual/api) diff --git a/docs/providers/documentation/zenduty-provider.mdx b/docs/providers/documentation/zenduty-provider.mdx new file mode 100644 index 0000000000..6d5c972f76 --- /dev/null +++ b/docs/providers/documentation/zenduty-provider.mdx @@ -0,0 +1,37 @@ +--- +title: "Zenduty" +sidebarTitle: "Zenduty Provider" +description: "Zenduty docs" +--- + +![User key](/images/zenduty.jpeg) + +## Inputs + +The Zenduty provider gets "title", "summary" and "service" as an input which will be used for the incident. +The `query` method of the ZendutyProvider` class takes the following inputs: + +- `title`: The title of Zenduty incident. +- `summary`: The summary of Zenduty incident. +- `service`: The service of Zenduty incident. + +## Outputs + +None. + +## Authentication Parameters + +The Zenduty gets api key as an authentication method. + +- `api_key` - Zenduty Api Key + Authentication configuration example: + +``` +zenduty: + authentication: + api_key: XXXXXXXXXXXXXXXX +``` + +## Useful Links + +- https://docs.zenduty.com/docs/api diff --git a/docs/providers/fingerprints.mdx b/docs/providers/fingerprints.mdx new file mode 100644 index 0000000000..cee4290287 --- /dev/null +++ b/docs/providers/fingerprints.mdx @@ -0,0 +1,77 @@ +--- +title: "Fingerprints" +sidebarTitle: "Fingerprints" +description: "Fingerprints are unique identifiers associated with alert instances in Keep. Every provider declares the fields fingerprints are calculated upon" +--- + + + Fingerprints defaults to Alert Name if the provider does not declare + fingerprint fields. + + +Fingerprints serve several important purposes in the context of alerting within Keep: + +### De-Duplication + +Alert fingerprints are used to prevent the duplication of enrichments/workflows triggering for the same underlying alert. +When Keep receives an alert, it calculates a fingerprint based on the configured fields declared within the Provider. +If two alerts have the same fingerprint, Keep considers them to be duplicates and will present one of them. +This helps reduce alert noise and prevent unnecessary workflow triggers/enrichments. + +### Grouping + +Keep uses alert fingerprints to group related alerts together. +Alerts with the same fingerprint are considered to be part of the same group, indicating that they are triggered by the same underlying condition or problem. +Grouping alerts makes it easier for operators to understand relations between different alert-sources, the root cause of an issue and take appropriate action faster. + +### Silencing + +Alert fingerprints are used in third-party tools to manage silences/mutes. +Silencing allows operators to temporarily suppress alerts with specific fingerprints, providing a way to acknowledge and handle known issues without generating additional notifications/triggers. + +### Visualization + +Alert fingerprints can also be used for visualization and analysis purposes. +They help in tracking the history and status of alerts over time and provide a means to correlate alerts with specific conditions or changes in the monitored system. + +The process of generating a fingerprint involves hashing the fields configured in the provider and their values associated an alert instance. +This results in a fixed-length, hexadecimal string that uniquely identifies that alert. +When Keep receives/gets an alert, it calculates the fingerprint for each alert to determine if it should trigger a workflow, be grouped, or is silenced. + +In summary, Keep alert fingerprints are essential for managing and organizing alerts in every third-party system. +They help prevent duplicates, group related alerts, enable silencing, and facilitate analysis and visualization of alert data, ultimately aiding in the effective operation and maintenance of monitored systems. + +### Examples + +This is the base provider class implementation for fingerprint fields: + +```python base_provider.py +class BaseProvider(metaclass=abc.ABCMeta): + OAUTH2_URL = None + PROVIDER_SCOPES: list[ProviderScope] = [] + PROVIDER_METHODS: list[ProviderMethod] = [] + FINGERPRINT_FIELDS: list[str] = [] +``` + +This is Datadog's provider implementation for fingerprint fields, where we calculate fingerprint based on the event groups and monitor id, as an example: + +```python datadog_provider.py +class DatadogProvider(BaseProvider): + """ + Datadog provider class. + """ + + PROVIDER_SCOPES = [ + ... + ] + PROVIDER_METHODS = [ + ... + ] + FINGERPRINT_FIELDS = ["groups", "monitor_id"] +``` + + + Keep allows for customization in anything related with fingerprints. If you + want to change the way a specific provider calculates the fingerprint of an + alert, you can simply configure the fields you require. + diff --git a/docs/providers/getting-started.mdx b/docs/providers/getting-started.mdx new file mode 100644 index 0000000000..fb10c800d9 --- /dev/null +++ b/docs/providers/getting-started.mdx @@ -0,0 +1,27 @@ +--- +Title: "Providers" +sidebarTitle: "Getting Started" +description: "We tried our best to cover all common providers." +--- + +Click [here](https://github.com/keephq/keep/issues/new?assignees=&labels=feature,provider&template=feature_request.md&title=Missing%20PROVIDER_NAME) if you feel like we're missing some and we'll do our best to add them ASAP. + +Common providers include: + + + AWS, GCP, Azure, etc. + + + Sentry, New Relic, Datadog, etc. + + + PagerDuty, OpsGenie, etc. + + + Email, Slack, Discord, Microsoft Teams, etc. + + + MySQL, Postgresql etc + + + diff --git a/docs/providers/overview.mdx b/docs/providers/overview.mdx new file mode 100644 index 0000000000..0888065178 --- /dev/null +++ b/docs/providers/overview.mdx @@ -0,0 +1,708 @@ +--- +title: "Overview" +sidebarTitle: "Overview" +description: "A Provider is a component of Keep that enables it to interact with third-party products. It is implemented as extensible Python code, making it easy to enhance and customize." +--- + +Providers are core components of Keep that allows Keep to either query data, send notifications, get alerts from or manage third-party tools. + +These third-party tools include, among others, Datadog, Cloudwatch, and Sentry for data querying and/or alert management, and Slack, Resend, Twilio, and PagerDuty for notifications/incidents. + +By leveraging Keep Providers, users are able to deeply integrate Keep with the tools they use and trust, providing them with a flexible and powerful way to manage these tools with ease and from a single pane. + + + + + } +> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +/> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + + } +> + + } +> + diff --git a/docs/providers/what-is-a-provider.mdx b/docs/providers/what-is-a-provider.mdx new file mode 100644 index 0000000000..69a4045800 --- /dev/null +++ b/docs/providers/what-is-a-provider.mdx @@ -0,0 +1,11 @@ +--- +title: "โ“ What is a Provider" +sidebarTitle: "What is a Provider?" +description: "A Provider is a component of Keep that enables it to interact with third-party products. It is implemented as extensible Python code, making it easy to enhance and customize." +--- + +Providers are core components of Keep that allow Keep to either query data or send notifications to products such as Datadog, Cloudwatch, and Sentry for data querying, and Slack, Email, and PagerDuty for sending notifications about alerts. + +By leveraging Keep Providers, developers are able to integrate Keep with the tools they use and trust, providing them with a flexible and powerful way to manage their alerts. + +![](/images/providers.png) diff --git a/docs/workflows/conditions/assert.mdx b/docs/workflows/conditions/assert.mdx new file mode 100644 index 0000000000..820cb1c81b --- /dev/null +++ b/docs/workflows/conditions/assert.mdx @@ -0,0 +1,23 @@ +--- +sidebarTitle: "Assert" +--- + +### The assert condition implements the "python assert" behaviour + +```yaml +- type: assert + name: REQUIRED. Must be unique among the list. + assert: REQUIRED. The assert expression to evaluate. +``` + +### Example + +```yaml +condition: + - type: assert + name: assert-condition + assert: "{{ steps.service-is-up.results.status_code }} == 200" +``` + +- If `steps.service-is-up.results.status_code` step returns 200 => `assert 200 == 200` => the conditions returns _False_ (since the assert pass) +- If `steps.service-is-up.results.status_code` step returns 404 => `assert 404 == 200` => the conditions returns _True_ (since the assert fails) diff --git a/docs/workflows/conditions/stddev.mdx b/docs/workflows/conditions/stddev.mdx new file mode 100644 index 0000000000..d61a69da1c --- /dev/null +++ b/docs/workflows/conditions/stddev.mdx @@ -0,0 +1,71 @@ +--- +title: "๐ŸŽฏ Stddev (Standard Deviation)" +sidebarTitle: "stddev" +description: "The 'stddev' condition implements standard deviation logic. It takes a list or a list of lists, along with a standard deviation threshold ('compare_to'), and returns all values that are farther away from the mean than the standard deviation." +--- + +```yaml +- type: stddev + name: REQUIRED. Must be unique among the list. + value: REQUIRED. The input of the standard deviation algo. + pivot_column: + OPTIONAL. Integer. If supplied, any item of `value` is threatened as + a list, and the `pivot_column` is extracted from any item. + For example, if pivot_column is 1, then the second column + (zero-based) of every item of the value list is used for + the calculation (see example for more details) + compare_to: REQUIRED. Integer. The standard deviation to compare against. +``` + +### Example + +```yaml +condition: + - name: stddev-condition + type: stddev + value: "{{ steps.db-step.results }}" + pivot_column: 2 + compare_to: 1 +``` + +For this example, the output of `db-step` step is a list of rows from the db: + +`[(1, 2 ,3), (1, 4, 5), (7, 8, 9)]` + +The `pivot_column` is 2, hence the values for the stddev calculation are: +`3`, `5` and `9`. + +Next, the sttdev condition calculates the stddev: + +```math +standard deviation = sqrt(sum((x - mean)^2) / N) +mean = (3 + 5 + 9) / 3 = 5.666666666666667 +``` + +And the standard deviation (sd) is: + +``` +standard deviation = sqrt(((3-5.666666666666667)^2 + (5-5.666666666666667)^2 + (9-5.666666666666667)^2) / 3) + = sqrt((9.555555555555557 + 0.11111111111111116 + 9.555555555555557) / 3) + = sqrt(6.740740740740742) + = 2.5961484292674155 +``` + +Therefore, the standard deviation of the dataset [3, 5, 9] is approximately 2.596. + +Thus, the values that are more than 1 standard deviation from the mean are 3 and 9, since they are outside the range of 5.666666666666667+-2.5961484292674155 (which is [3.0705, 8.2628]). + +### Same example without pivot_column + +Notice that we used `pivot_column` since the output db `db-step` was a list of rows. +If the output was just list, we could skip it. + +For example, if the output of `db-step` was `(3, 5 ,9)`, we could just use: + +```yaml +condition: + - name: stddev-condition + type: stddev + value: "{{ steps.db-step.results }}" + compare_to: 1 +``` diff --git a/docs/workflows/conditions/threshold.mdx b/docs/workflows/conditions/threshold.mdx new file mode 100644 index 0000000000..870ca40bf2 --- /dev/null +++ b/docs/workflows/conditions/threshold.mdx @@ -0,0 +1,27 @@ +--- +title: "๐ŸŽฏ Threshold" +sidebarTitle: "Threshold" +--- + +### The threshold condition compare between two values, and returns positive (True) if applied. + +```yaml +- type: threshold + name: REQUIRED. Must be unique among the list. + value: REQUIRED. Left side of the comparison. + compare_to: REQUIRED. Right side of the comparison. + compare_type: OPTIONAL ("lt" or "gt". default is "gt") +``` + +### Example + +```yaml +condition: + - type: threshold + name: threshold-condition + value: "{{ steps.db-no-space.results }}" + compare_to: 10 +``` + +- If `db-no-space` step returns 11 => `value` > 10 => the conditions returns _True_ +- If `db-no-space` step returns 9.6 => `value` < 10 => the conditions returns _False_ diff --git a/docs/workflows/conditions/what-is-a-condition.mdx b/docs/workflows/conditions/what-is-a-condition.mdx new file mode 100644 index 0000000000..51495a18ae --- /dev/null +++ b/docs/workflows/conditions/what-is-a-condition.mdx @@ -0,0 +1,17 @@ +--- +title: "โ“ What is a Condition" +sidebarTitle: "What is a Condition?" +--- + +Generally speaking, a condition is: + +> A predefined rule that defines when an action should be run. + +In Keep's context, a condition is a predefined rule that decide if an action should be triggered or not. + +Each condition has its own inputs/output. +The current supported conditions: + +1. [Threshold](/workflows/conditions/threshold) +2. [Assert](/workflows/conditions/assert) +3. [Stddev](/workflows/conditions/stddev) diff --git a/docs/workflows/examples/multi-step-alert.mdx b/docs/workflows/examples/multi-step-alert.mdx new file mode 100644 index 0000000000..7469ade6d6 --- /dev/null +++ b/docs/workflows/examples/multi-step-alert.mdx @@ -0,0 +1,82 @@ +--- +title: "Multiple steps alert example" +sidebarTitle: "Multi-Step Alert" +description: "Breakdown of the alert and further explanations can be found in the bottom of this page." +--- + +```yaml +# Check both databases prod1 and prod2 and alert if any of them has less than 10% disk space left. +alert: + id: db-disk-space + description: Check that the DB has enough disk space + steps: + - name: db-prod1-no-space + provider: + type: mock + config: "{{ providers.db-server-mock }}" + with: + command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space + command_output: 91% # Mock + - name: db-prod2-no-space + provider: + type: mock + config: "{{ providers.db-server-mock }}" + with: + command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space + command_output: 94.5% # Mock + actions: + - name: trigger-telegram + condition: + - type: threshold + value: "{{ steps.db-prod1-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + alias: A + - type: threshold + value: "{{ steps.db-prod2-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + alias: B + # trigger the action if any of the conditions are met: + if: "{{ A }} or {{ B }}" + provider: + type: telegram + config: + authentication: + bot_token: "{{ env.TELEGRAM_BOT_TOKEN }}" + with: + chat_id: "{{ env.TELEGRAM_CHAT_ID }}" + message: Keep Alert Test + +providers: + db-server-mock: + description: Paper DB Server + authentication: +``` + +## Breakdown + +### Steps + +In this example we can see two steps: + +- db-prod1-no-space - checks db space of db prod1 +- db-prod2-no-space - checkd db space of db prod2 + +### Conditions + +The action has two threshold conditions: + +``` +condition: + - type: threshold + value: "{{ steps.this.results }}" + compare_to: 90% # Trigger if more than 90% full +``` + +But now we've added an `alias` to each condition, so it'll be easier to check it in the `action` itself. + +### Action (if statement) + +The action now uses the `if` statement to alert if **one** of the databases has less than 10% disk space left. + +We can use `if: "{{ A }} and {{ B }}"` to alert only if both databases has less than 10% disk space left. +_Note that its the default behavior so you may achieve the same without specifying `if` statement._ diff --git a/docs/workflows/examples/reusable-action-alert.mdx b/docs/workflows/examples/reusable-action-alert.mdx new file mode 100644 index 0000000000..3833c84fa3 --- /dev/null +++ b/docs/workflows/examples/reusable-action-alert.mdx @@ -0,0 +1,145 @@ +--- +title: "Reusable Actions For Alert" +sidebarTitle: "Reusable actions For Alert" +description: "This example shows you how to check both database `prod1`, `prod2` and determines if any of them hit `90%` threshold of disk space then using an action template to send notification to two telegram channels." +--- + +Here is the full configurations: + +```yaml +# Check both databases prod1 and prod2 and alert if any of them has less than 10% disk space left. +alert: + id: db-disk-space + description: Check that the DB has enough disk space + steps: + - name: db-prod1-no-space + provider: + type: mock + config: "{{ providers.db-server-mock }}" + with: + command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space + command_output: 91% # Mock + - name: db-prod2-no-space + provider: + type: mock + config: "{{ providers.db-server-mock }}" + with: + command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space + command_output: 94.5% # Mock + actions: + - name: trigger-telegram1 + use: @trigger-telegrame + provider: + config: + authentication: + bot_token: "{{ env.TELEGRAM_BOT_TOKEN1 }}" + with: + chat_id: "{{ env.TELEGRAM_CHAT_ID1 }}" + - name: trigger-telegram2 + use: @trigger-telegrame + provider: + config: + authentication: + bot_token: "{{ env.TELEGRAM_BOT_TOKEN2 }}" + with: + chat_id: "{{ env.TELEGRAM_CHAT_ID2 }}" + +actions: + - name: trigger-telegram + use: @trigger-telegram + condition: + - type: threshold + value: "{{ steps.db-prod1-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + alias: A + - type: threshold + value: "{{ steps.db-prod2-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + alias: B + # trigger the action if any of the conditions are met: + if: "{{ A }} or {{ B }}" + provider: + type: telegram + with: + message: Keep Alert Test + +providers: + db-server-mock: + description: Paper DB Server + authentication: +``` + +## Breakdown + +### Steps + +In this example we can see two steps: + +- db-prod1-no-space - checks db space of db prod1 +- db-prod2-no-space - checkd db space of db prod2 + +### Conditions + +The action has two threshold conditions: + +``` +condition: + - type: threshold + value: "{{ steps.this.results }}" + compare_to: 90% # Trigger if more than 90% full +``` + +But now we've added an `alias` to each condition, so it'll be easier to check it in the `action` itself. + +### Action + +The action template is defined as. + +``` +actions: + - name: trigger-telegram + use: @trigger-telegram + condition: + - type: threshold + value: "{{ steps.db-prod1-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + alias: A + - type: threshold + value: "{{ steps.db-prod2-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + alias: B + # trigger the action if any of the conditions are met: + if: "{{ A }} or {{ B }}" + provider: + type: telegram + with: + message: Keep Alert Test +``` + +The action uses the `if` statement to alert if **one** of the databases has less than 10% disk space left. +Note that we don't define any telegram `chat_id` and `bot_token` here because we want to define two separate telegram credentials for the two channels. + +The credentials are defined in `actions` definitions within the `alert` configuration. +Note that we declare `use: @trigger-telegram` to use the defined action template. + +``` +alert: + ... + actions: + - name: trigger-telegram1 + use: @trigger-telegram + provider: + config: + authentication: + bot_token: "{{ env.TELEGRAM_BOT_TOKEN1 }}" + with: + chat_id: "{{ env.TELEGRAM_CHAT_ID1 }}" + - name: trigger-telegram2 + use: @trigger-telegram + provider: + config: + authentication: + bot_token: "{{ env.TELEGRAM_BOT_TOKEN2 }}" + with: + chat_id: "{{ env.TELEGRAM_CHAT_ID2 }}" +``` diff --git a/docs/workflows/functions/add-time-to-date.mdx b/docs/workflows/functions/add-time-to-date.mdx new file mode 100644 index 0000000000..bcd1e1fe11 --- /dev/null +++ b/docs/workflows/functions/add-time-to-date.mdx @@ -0,0 +1,32 @@ +--- +title: "add_time_to_date(date, date_format, time_str)" +sidebarTitle: "add_time_to_date" +--- + +### Description +Adds a specified amount of time to a given date. + +### Input +- `date` (str or datetime): The date to which the time will be added. Can be a string or a datetime object. +- `date_format` (str): The format of the date string if the date is provided as a string. +- `time_str` (str): The time to add, specified as a string (e.g., '1w 2d 3h 30m'). + +### Output +A `datetime` object representing the new date with the added time. + +### Example +```yaml +workflow: + id: datadog-alerts + description: handle alerts + triggers: + - type: alert + filters: + - key: source + value: datadog + actions: + - name: set-reminder-date + provider: + type: console + with: + alert_message: keep.add_time_to_date("{{ alert.date }}", "%Y-%m-%dT%H:%M:%S.%f%z", "1w 2d 3h 30m") diff --git a/docs/workflows/functions/all.mdx b/docs/workflows/functions/all.mdx new file mode 100644 index 0000000000..392a747ed3 --- /dev/null +++ b/docs/workflows/functions/all.mdx @@ -0,0 +1,25 @@ +--- +title: "all(iterable)" +sidebarTitle: "all" +--- + +### Input + +An iterable. + +### Output + +True if all items are identical, False otherwise. + +### Example + +```yaml +actions: + - name: trigger-slack + if: "keep.all({{ steps.db-step.results }})" + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + message: "Items are equal" +``` diff --git a/docs/workflows/functions/datetime-compare.mdx b/docs/workflows/functions/datetime-compare.mdx new file mode 100644 index 0000000000..f7b2841328 --- /dev/null +++ b/docs/workflows/functions/datetime-compare.mdx @@ -0,0 +1,43 @@ +--- +title: "datetime_compare" +--- + +datetime_compare(t1, t2) compares t1-t2 and returns the diff in seconds + +### Input + +datetime.datetime, datetime.datetime + +### Output + +Integer, timedelta in seconds. + +### Example + +```yaml +actions: + - name: trigger-slack + condition: + - type: threshold + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 3600 # seconds (1 hour) + compare_type: gt # greater than + + +from datetime import datetime + +def datetime_compare(t1: datetime, t2: datetime) -> int: + """ + Compares two datetime objects and returns the time difference in seconds. + + :param t1: First datetime object + :param t2: Second datetime object + :return: Time difference in seconds + """ + return int((t1 - t2).total_seconds()) + +# Example usage: +# t1 = datetime.utcnow() +# t2 = datetime.utcnow() - timedelta(hours=2) +# print(datetime_compare(t1, t2)) # Should return 7200 (2 hours * 3600 seconds) +``` \ No newline at end of file diff --git a/docs/workflows/functions/diff.mdx b/docs/workflows/functions/diff.mdx new file mode 100644 index 0000000000..8cc6819c9c --- /dev/null +++ b/docs/workflows/functions/diff.mdx @@ -0,0 +1,22 @@ +--- +title: "diff(iterable)" +sidebarTitle: "diff" +--- + +### Input +An iterable. + +### Output +Opposite of [`all`](/workflows/functions/all) - returns False if all items are identical, else True + +### Example +```yaml +actions: +- name: trigger-slack + if: "keep.diff({{ steps.db-step.results }})" + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + message: "Items are equal" +``` diff --git a/docs/workflows/functions/encode.mdx b/docs/workflows/functions/encode.mdx new file mode 100644 index 0000000000..e6b0ae8f25 --- /dev/null +++ b/docs/workflows/functions/encode.mdx @@ -0,0 +1,24 @@ +--- +title: "encode(string)" +sidebarTitle: "encode" +--- + +### Input + +string - string + +### Output + +string - URL encoded string + +### Example + +```yaml +actions: + - name: trigger-slack + condition: + - type: equals + value: keep.encode('abc def') + compare_to: "abc%20def" + compare_type: eq +``` diff --git a/docs/workflows/functions/first.mdx b/docs/workflows/functions/first.mdx new file mode 100644 index 0000000000..2be9c7b878 --- /dev/null +++ b/docs/workflows/functions/first.mdx @@ -0,0 +1,27 @@ +--- +title: "first(iterable)" +sidebarTitle: "first" +--- + +### Input + +An iterable. + +### Output + +The first item of the iterable. + +### Example + +```yaml +actions: + - name: keep-slack + foreach: "{{steps.this.results}}" + condition: + - type: threshold + value: "keep.first(keep.split({{ foreach.value }}, ' '))" + # each line looks like: + # ' 64 2023-02-09 20:08:16,773 INFO: uvicorn.access -: 127.0.0.1:53948 - "GET /test2 HTTP/1.1" 503 Service Unavailable' + # where the "64" is the number of the + compare_to: 70 +``` diff --git a/docs/workflows/functions/last.mdx b/docs/workflows/functions/last.mdx new file mode 100644 index 0000000000..a3ec04cee0 --- /dev/null +++ b/docs/workflows/functions/last.mdx @@ -0,0 +1,27 @@ +--- +title: "last(iterable)" +sidebarTitle: "last" +--- + +### Input + +An iterable. + +### Output + +The last item of the iterable. + +### Example + +```yaml +actions: + - name: keep-slack + foreach: "{{steps.this.results}}" + condition: + - type: threshold + value: "keep.last(keep.split({{ foreach.value }}, ' '))" + # each line looks like: + # '2023-02-09 20:08:16,773 INFO: uvicorn.access -: 127.0.0.1:53948 - "GET /test2 HTTP/1.1" 503' + # where the "503" is the number of the + compare_to: 200 +``` diff --git a/docs/workflows/functions/len.mdx b/docs/workflows/functions/len.mdx new file mode 100644 index 0000000000..44b0fe4eb7 --- /dev/null +++ b/docs/workflows/functions/len.mdx @@ -0,0 +1,21 @@ +--- +title: "len(iterable)" +sidebarTitle: "len" +--- + +### Input + +An iterable. + +### Output + +Integer. The length of the iterable. + +### Example + +```yaml +condition: + - type: threshold + value: "keep.len({{ steps.db-no-space.results }})" + compare_to: 10 +``` diff --git a/docs/workflows/functions/lowercase.mdx b/docs/workflows/functions/lowercase.mdx new file mode 100644 index 0000000000..e945f4b624 --- /dev/null +++ b/docs/workflows/functions/lowercase.mdx @@ -0,0 +1,24 @@ +--- +title: "string(string)" +sidebarTitle: "lowercase" +--- + +### Input + +A string. + +### Output + +Returns the string which is lowercased. + +### Example + +```yaml +actions: + - name: trigger-slack + condition: + - type: equals + value: keep.lowercase('ABC DEF') + compare_to: "abc def" + compare_type: eq +``` diff --git a/docs/workflows/functions/split.mdx b/docs/workflows/functions/split.mdx new file mode 100644 index 0000000000..56476b7e56 --- /dev/null +++ b/docs/workflows/functions/split.mdx @@ -0,0 +1,27 @@ +--- +title: "string(string, delimeter)" +sidebarTitle: "split" +--- + +### Input + +A string and delimeter. + +### Output + +Returns the string, splitted by the delimeter. + +### Example + +```yaml +actions: + - name: keep-slack + foreach: "{{steps.this.results}}" + condition: + - type: threshold + value: "keep.first(keep.split({{ foreach.value }}, ' '))" + # each line looks like: + # ' 64 2023-02-09 20:08:16,773 INFO: uvicorn.access -: 127.0.0.1:53948 - "GET /test2 HTTP/1.1" 503 Service Unavailable' + # where the "64" is the number of the + compare_to: 70 +``` diff --git a/docs/workflows/functions/to-utc.mdx b/docs/workflows/functions/to-utc.mdx new file mode 100644 index 0000000000..930de0dfb4 --- /dev/null +++ b/docs/workflows/functions/to-utc.mdx @@ -0,0 +1,26 @@ +--- +title: "to_utc" +--- + +### Input + +datetime.datetime | str + +### Output + +datetime.datetime - utc converted + +### Example + +```yaml +actions: + - name: trigger-slack + condition: + - type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 1 # hours + compare_type: gt # greater than +``` diff --git a/docs/workflows/functions/uppercase.mdx b/docs/workflows/functions/uppercase.mdx new file mode 100644 index 0000000000..45f3f66727 --- /dev/null +++ b/docs/workflows/functions/uppercase.mdx @@ -0,0 +1,24 @@ +--- +title: "string(string)" +sidebarTitle: "uppercase" +--- + +### Input + +A string. + +### Output + +Returns the string which is uppercased. + +### Example + +```yaml +actions: + - name: trigger-slack + condition: + - type: equals + value: keep.uppercase('abc def') + compare_to: "ABC DEF" + compare_type: eq +``` diff --git a/docs/workflows/functions/utcnow.mdx b/docs/workflows/functions/utcnow.mdx new file mode 100644 index 0000000000..0cf0900dc1 --- /dev/null +++ b/docs/workflows/functions/utcnow.mdx @@ -0,0 +1,27 @@ +--- +title: "utcnow" +sidebarTitle: "utcnow" +--- + +### Input + +N/A + +### Output + +datetime.datetime object represents utcnow + +### Example + +```yaml +actions: + - name: trigger-slack + condition: + - type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 1 # hours + compare_type: gt # greater than +``` diff --git a/docs/workflows/functions/what-is-a-function.mdx b/docs/workflows/functions/what-is-a-function.mdx new file mode 100644 index 0000000000..24d2269297 --- /dev/null +++ b/docs/workflows/functions/what-is-a-function.mdx @@ -0,0 +1,22 @@ +--- +title: "What is a Function?" +description: "In Keep's context, functions extend the power of context injection. For example, if a step returns a list, you can use the `keep.len` function to count and use the number of results instead of the actual results." +--- + + + To use a keep function, prefix it with `keep.`, for example, use `keep.len` + and not `len` + + +```yaml +condition: + - type: threshold + # Use the len of the results instead of the results + value: "keep.len({{ steps.db-no-space.results }})" + compare_to: 10 +``` + +## How to create a new function? + +Keep functions are designed to be easily extendible!
+To create a new function, all you have to do is to add it to the [functions](https://github.com/keephq/keep/blob/main/keep/functions/__init__.py) directory `__init__.py` file. diff --git a/docs/workflows/overview.mdx b/docs/workflows/overview.mdx new file mode 100644 index 0000000000..f728d3e6e6 --- /dev/null +++ b/docs/workflows/overview.mdx @@ -0,0 +1,57 @@ +--- +title: "Overview" +--- + +Need any help with creating a Workflow? Feel free to submit an issue or join our Slack and we will help with that. +A Workflow in Keep is a YAML-based configuration file designed to manage, automate, and enrich alerts. Once uploaded to Keep, the workflow can run based on three different types of triggers: manual, alert, or interval. In this document, we'll look into each of these components in detail. + +In this section we will review the Workflow components. + +## Triggers +When you run alert with the CLI using `keep run`, the CLI run the alert regardless of the triggers. +A trigger is an event that starts the workflow. It could be a manual trigger, an alert, or an interval depending on your use case. +Keep support four types of triggers: +### Manual trigger +``` +# run manually +triggers: + - type: manual +``` + +### Alert trigger +``` +# run every time alert from cloudwatch triggered +triggers: + - type: alert + filters: + - key: source + value: cloudwatch +``` + +### Incident trigger +``` +# run when incident get created, update or deleted +# You can use multiple events, but at least one is required +triggers: + - type: incident + events: + - created + - deleted +``` + +### Interval trigger +``` +# run every 10 seconds +triggers: + - type: interval + value: 10 +``` + +## Steps +Steps are optional and define a sequence of actions that fetch or compute data. They are used to add data to the workflow, which can be used in other steps or actions. + +## Actions +An action defines what to do when a workflow is triggered. Actions usually rely on providers for executing specific tasks, like sending a Slack message. + +## Conditions +A condition sets the rules under which an action should be performed. For example, you can set a condition to only trigger an action if certain criteria are met. diff --git a/docs/workflows/state.mdx b/docs/workflows/state.mdx new file mode 100644 index 0000000000..c06e46f34f --- /dev/null +++ b/docs/workflows/state.mdx @@ -0,0 +1,47 @@ +--- +title: "State" +--- + +## Intro +Keep State Manager is currently used for: +1. Throttling +2. Track alerts over time +3. Previous runs context + +State is currently being saved as a JSON file under `./state/keepstate.json`, a path that can be overriden by setting the `KEEP_STATE_FILE` environment variable. + +## Example +One of the usages for Keep's state mechanism is throttling, see [One Until Resolved](/workflows/throttles/one-until-resolved) Keep handles it for you behind the scenes so you can use it without doing any further modifications. + +## Serverless +If you are running Keep on production, you should host the `keepstate.json` file on persistance storage and mount it to your serverless environment. Feel free to create an issue if you need solution for your preferred deployment architecture. + +## Keep state structure +An example for a simple state file: +``` +{ + "service-is-up": [ + { + "alert_status": "resolved", + "alert_context": { + "alert_id": "service-is-up", + "alert_owners": [], + "alert_tags": [], + "alert_steps_context": { + "step1": { + "conditions": {}, + "results": {} + } + } + } + } + ] +} +``` + +### Roadmap + +Keep's roadmap around state (great first issues): +- Saving state in a database. +- Hosting state in buckets (AWS, GCP and Azure -> read/write). +- Enriching state with more context so throttling mechanism would be flexer. diff --git a/docs/workflows/syntax/basic-syntax.mdx b/docs/workflows/syntax/basic-syntax.mdx new file mode 100644 index 0000000000..143dff685a --- /dev/null +++ b/docs/workflows/syntax/basic-syntax.mdx @@ -0,0 +1,130 @@ +--- +title: "Basic Syntax" +description: "At Keep, we view alerts as workflows, which consist of a series of steps executed in sequence, each with its own specific input and output. To keep our approach simple, Keep's syntax is designed to closely resemble the syntax used in GitHub Actions. We believe that GitHub Actions has a well-established syntax, and there is no need to reinvent the wheel." +--- +## Full Example +```yaml title=examples/raw_sql_query_datetime.yml +# Notify if a result queried from the DB is above a certain thershold. +workflow: + id: raw-sql-query + description: Monitor that time difference is no more than 1 hour + steps: + - name: get-max-datetime + provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + # Get max(datetime) from the random table + query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" + actions: + - name: trigger-slack + condition: + - name: threshold-condition + type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 1 # hours + compare_type: gt # greater than + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + message: "DB datetime value ({{ actions.trigger-slack.conditions.threshold.0.compare_value }}) is greater than 1! ๐Ÿšจ" +``` + +## Breakdown ๐Ÿ”จ + +### Workflow +```yaml +workflow: + id: raw-sql-query + description: Monitor that time difference is no more than 1 hour + disabled: Optionally prevent this workflow from running + steps: + - + actions: + - +``` + +`Workflow` is built of: +- Metadata (id, description. owners and tags will be added soon) +- `steps` - list of steps +- `actions` - list of actions +- `on-failure` - a conditionless action used in case of an alert failure + +### Provider +```yaml +provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" + on-failure: + retry: + count: 4 + interval: 10 +``` +`Provider` is built of: +- `type` - the type of the provider ([see supported providers](/providers/overview/)) +- `config` - the provider configuration. you can either supply it explicitly or using `"{{ providers.mysql-prod }}"` +- `with` - all type-specific provider configuration. for example, for `mysql` we will provide the SQL query. +- `on-failure` - handling the error when provider execution fails, it is built of: + - `retry` - specifies the retry parameters which include: + - `count`: maximum number of retries. + - `interval`: duration in seconds between each retry. + +### Condition +```yaml +- name: threshold-condition + type: threshold + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 1 + compare_type: gt +``` +`Condition` is built of: +- `name` - a unique identifier to the condition +- `type` - the type of the condition +- `value` - the value that will be supplied to the condition during the alert execution +- `compare_to` - whats `value` will be compared against +- `compare_type` - all type-specific condition configuration + +### Steps/Actions +```yaml +steps/actions: + - name: trigger-slack + condition: + - name: threshold-condition + type: threshold + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 1 + compare_type: gt + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + message: "DB datetime value ({{ actions.trigger-slack.conditions.threshold.0.compare_value }}) is greater than 1! ๐Ÿšจ" +``` + +`Step/Action` is built of: +- `name` - the name of the action. +- `condition` - a list of conditions that +- `provider` - the provider that will trigger the action. +- `throttle` - you can [throttle](/workflows/throttles/what-is-a-throttle) the action. +- `if` - action can be limited to when certain [conditions](/workflows/conditions/what-is-a-condition) are met. +- `foreach` - when `foreach` block supplied, Keep will evaluate it as a list, and evaluates the `action` for every item in the list. + +The `provider` configuration is already covered in [Providers](/workflows/syntax/basic-syntax) + +### On-failure +```yaml +on-failure: + # Just need a provider we can use to send the failure reason + provider: + type: slack + config: " {{ providers.slack-demo }} " +``` + +On-failure is actually a condtionless `Action` used to notify in case the alert failed with an exception. +The provider is passed a `message` (string) to it's `notify` function. diff --git a/docs/workflows/syntax/context-syntax.mdx b/docs/workflows/syntax/context-syntax.mdx new file mode 100644 index 0000000000..4cf8547804 --- /dev/null +++ b/docs/workflows/syntax/context-syntax.mdx @@ -0,0 +1,47 @@ +--- +title: "Working with context" +sidebarTitle: "Content Syntax" +description: "Keep uses [Mustache](https://mustache.github.io/) syntax to inject context at runtime, supporting functions, dictionaries, lists, and nested access." +--- + +Here are some examples: + +- `{{ steps.step-id.results }}` - Result of step-id +- `keep.len({{ steps.step-id.results }})` - Number of results from step-id +- `{{ steps.this.results[0] }}` - First result of this step +- `keep.first({{ steps.this.results }})` - First result (equivalent to the previous example) +- `{{ steps.step-id.results[0][0] }}` - First item of the first result + +If you have suggestions/improvments/bugs for Keep's syntax, please [open feature request](https://github.com/keephq/keep/issues/new?assignees=&labels=&template=feature_request.md&title=) and get eternal glory. + +### Special context + +Keep provides two special context containers - `providers` and `steps` + +### Providers + +Provider configuration typically look like: + +```yaml +provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + # Get max(datetime) from the random table + query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" +``` + +Here, `{{ providers.mysql-prod }}` is dynamically translated at runtime from the providers.yaml file. + +### Steps + +The output of steps can be accessed from anywhere in the YAML using `{{ steps.step-name.results }}`. This output can be used in conditions, actions, or any other place. + +### Functions + +Keep's syntax allow to use functions on context blocks. For example, `keep.len({{ steps.step-name.results }})` will return the number of results of `step-name` step. + +- [See supported functions](/workflows/functions/what-is-a-function) +- [Create new functions](/workflows/functions/what-is-a-function#how-to-create-a-new-function) + +Under the hood, Keep uses Python's `ast` module to parse these expressions and evaluate them as best as possible. diff --git a/docs/workflows/syntax/foreach-syntax.mdx b/docs/workflows/syntax/foreach-syntax.mdx new file mode 100644 index 0000000000..310f7ceb06 --- /dev/null +++ b/docs/workflows/syntax/foreach-syntax.mdx @@ -0,0 +1,86 @@ +--- +title: "Foreach" +sidebarTitle: "Foreach Syntax" +description: "Foreach syntax add the flexability of running action per result instead of only once on all results." +--- + +## Usage +There are two main operations mode for `foreach`: +1. In the [steps](#steps-section) section. +2. In the [action](#actions-section) section. + +When you enter a `foreach` context, you can use `{{ foreach.value }}` to use the specific value. +Let's review how to use `foreach`. + +### Steps section +Using `foreach` in `steps`, let you run a step for each result of a previous step. +In other words: +1. Run some step. +2. For each result of the previous step, run another step. + +For example, in this alert, we: +1. Get all node id's (`get-node-ids` step). +2. For each node, get the data for result id (`get-filesystems-by-node-id` step). + +```yaml + steps: + # Get all nodes ids + - name: get-node-ids + provider: + type: postgres + config: "{{ providers.postgres-server }}" + with: + query: "select distinct(node_id) from filesystem;" + # For each node id, get the filesystem status and find filesystems in node that are not balanced + - name: get-filesystems-by-node-id + foreach: "{{ steps.get-node-ids.results }}" + provider: + type: postgres + config: "{{ providers.postgres-server }}" + with: + query: "select * from filesystem where node_id = '{{ foreach.value[0] }}';" +``` + + +In this case, `foreach.value` contains a row from the database, and `foreach.value[0]` is the first column of this row. + +### Actions section +Now, let's see how `foreach` can be used in the `actions` section. + +In the following example, we are using `foreach` twice: +1. `foreach: "{{ steps.get-filesystems-by-node-id.results }}"` - iterate over the results of `get-filesystems-by-node-id` results +2. `{{#foreach.stddev}}` - using mustache syntax, we iterate over `foreach.stddev` results. + +#### Wait, but what's `foreach.stddev`? +> **tldr**: conditions can extend `foreach` with other attributes, to support more context. + +Due to the fact that conditions work on `foreach.value`, we can extend `foreach` with other attributes. +For example, the `threshold` condition extends `foreach` with `level`, so you can use `foreach.level`, and `stddev` condition extends `foreach` with `stddev` attribute. + + +```yaml +actions: + - name: push-alert-to-postgres + # Run on get-filesystems-by-node-id results. + # Notice each result is a list of filesystems in node + foreach: "{{ steps.get-filesystems-by-node-id.results }}" + # Alert on nodes that have filesystems that away from the standard deviation + condition: + - name: stddev-condition + type: stddev + # foreach.value contain a list of rows from the database + value: "{{ foreach.value }}" + pivot_column: 8 # 8th column is the filesystem usage percentage + compare_to: 1 + + provider: + type: postgres + config: "{{ providers.postgres-server }}" + with: + query: > + INSERT INTO alert (alert_level, alert_message) + VALUES ('major', 'The node {{ foreach.value[0][4] }} has filesystems that are not balanced: + {{#foreach.stddev}} + - Filesystem {{ value[0] }} is {{stddev}} away from the standard deviation + {{/foreach.stddev}}') +``` diff --git a/docs/workflows/throttles/one-until-resolved.mdx b/docs/workflows/throttles/one-until-resolved.mdx new file mode 100644 index 0000000000..6f36874bd5 --- /dev/null +++ b/docs/workflows/throttles/one-until-resolved.mdx @@ -0,0 +1,44 @@ +--- +title: "One Until Resolved" +description: "The action will trigger once the alert is resolved." +--- + +For example: + +1. Alert executed and action were triggered as a result -> the alert status is now "Firing". +2. Alert executed again and action should be triggered -> the action will be throttled. +3. Alert executed and no action is required -> the alert status is now "Resolved". +4. Alert exectued and action were triggered -> the action is triggered + +## How to use + +Add the following attribute to your action: + +``` +throttle: + type: one_until_resolved +``` + +For example: + +``` +# Database disk space is low (<10%) +alert: + id: service-is-up + description: Check that the service is up + steps: + - name: service-is-up + provider: + type: python + with: + # any external libraries needed + imports: requests + code: requests.get("http://localhost:3000") + actions: + - name: trigger-slack + throttle: + type: one_until_resolved + condition: + - type: assert + assert: "{{ steps.this.results.status_code }} == 200" +``` diff --git a/docs/workflows/throttles/what-is-a-throttle.mdx b/docs/workflows/throttles/what-is-a-throttle.mdx new file mode 100644 index 0000000000..9d62245c5d --- /dev/null +++ b/docs/workflows/throttles/what-is-a-throttle.mdx @@ -0,0 +1,14 @@ +--- +title: "What is a Throttle?" +description: "The purpose of throttling is to prevent any action from being triggered too many times, thus generating too many alerts." +--- + +## Throttle strategies + +- [One Until Resolved](/workflows/throttles/one-until-resolved) + +## Implementing new strategy + +To create a new throttle strategy, create a new class that inherits from `base_throttle.py`, and implements `check_throttling`. + +[You can also just submit a new feature request](https://github.com/keephq/keep/issues/new?assignees=&labels=&template=feature_request.md&title=feature:%20new%20throttling%20strategy) and we will get to it ASAP! diff --git a/ee/LICENSE b/ee/LICENSE new file mode 100644 index 0000000000..3395cf32da --- /dev/null +++ b/ee/LICENSE @@ -0,0 +1,35 @@ +The Keep Enterprise Edition (EE) license (the Enterprise License) +Copyright (c) 2024-present Keep Alerting LTD + +With regard to the Keep Software: + +This software and associated documentation files (the "Software") may only be +used in production, if you (and any entity that you represent) have agreed to, +and are in compliance with, the Keep Subscription Terms of Service, available +(if not available, it's impossible to comply) +at https://www.keephq.dev/terms-of-service (the "The Enterprise Termsโ€), or other +agreement governing the use of the Software, as agreed by you and Keep, +and otherwise have a valid Keep Enterprise Edition subscription for the +correct number of user seats. Subject to the foregoing sentence, you are free to +modify this Software and publish patches to the Software. You agree that Keep +and/or its licensors (as applicable) retain all right, title and interest in and +to all such modifications and/or patches, and all such modifications and/or +patches may only be used, copied, modified, displayed, distributed, or otherwise +exploited with a valid Keep Enterprise Edition subscription for the correct +number of user seats. You agree that Keep and/or its licensors (as applicable) retain +all right, title and interest in and to all such modifications. You are not +granted any other rights beyond what is expressly stated herein. Subject to the +foregoing, it is forbidden to copy, merge, publish, distribute, sublicense, +and/or sell the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +For all third party components incorporated into the Keep Software, those +components are licensed under the original license provided by the owner of the +applicable component. \ No newline at end of file diff --git a/keep/action/__init__.py b/ee/experimental/__init__.py similarity index 100% rename from keep/action/__init__.py rename to ee/experimental/__init__.py diff --git a/keep/alert/__init__.py b/ee/experimental/ai_temp/.gitkeep similarity index 100% rename from keep/alert/__init__.py rename to ee/experimental/ai_temp/.gitkeep diff --git a/ee/experimental/generative_utils.py b/ee/experimental/generative_utils.py new file mode 100644 index 0000000000..5689eb7c0a --- /dev/null +++ b/ee/experimental/generative_utils.py @@ -0,0 +1,239 @@ +import logging +import os + +import numpy as np +from openai import OpenAI + +from keep.api.core.db import get_incident_by_id + +from keep.api.models.db.alert import Incident + +logger = logging.getLogger(__name__) + +SUMMARY_GENERATOR_VERBOSE_NAME = "Summary generator v0.1" +NAME_GENERATOR_VERBOSE_NAME = "Name generator v0.1" +MAX_SUMMARY_LENGTH = 900 +MAX_NAME_LENGTH = 75 + +def generate_incident_summary( + incident: Incident, + use_n_alerts_for_summary: int = -1, + generate_summary: str = None, + max_summary_length: int = None, +) -> str: + if "OPENAI_API_KEY" not in os.environ: + logger.error( + "OpenAI API key is not set. Incident summary generation is not available.", + extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, + "incident_id": incident.id, "tenant_id": incident.tenant_id} + ) + return "" + + if not generate_summary: + generate_summary = os.environ.get("GENERATE_INCIDENT_SUMMARY", "True") + + if generate_summary == "False": + logger.info(f"Incident summary generation is disabled. Aborting.", + extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + return "" + + if incident.user_summary: + return "" + + if not max_summary_length: + max_summary_length = os.environ.get( + "MAX_SUMMARY_LENGTH", MAX_SUMMARY_LENGTH) + + try: + client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) + + incident = get_incident_by_id(incident.tenant_id, incident.id) + + description_strings = np.unique( + [f'{alert.event["name"]}' for alert in incident.alerts] + ).tolist() + + if use_n_alerts_for_summary > 0: + incident_description = "\n".join( + description_strings[:use_n_alerts_for_summary] + ) + else: + incident_description = "\n".join(description_strings) + + timestamps = [alert.timestamp for alert in incident.alerts] + incident_start = min(timestamps).replace(microsecond=0) + incident_end = max(timestamps).replace(microsecond=0) + + model = os.environ.get("OPENAI_MODEL", "gpt-4o-mini") + + summary = ( + client.chat.completions.create( + model=model, + messages=[ + { + "role": "system", + "content": f"""You are a very skilled DevOps specialist who can summarize any incident based on alert descriptions. + When provided with information, summarize it in a 2-3 sentences explaining what happened and when. + ONLY SUMMARIZE WHAT YOU SEE. In the end add information about potential scenario of the incident. + When provided with information, answer with max a {int(max_summary_length * 0.9)} symbols excerpt + describing incident thoroughly. + + EXAMPLE: + An incident occurred between 2022-11-17 14:11:04 and 2022-11-22 22:19:04, involving a + total of 200 alerts. The alerts indicated critical and warning issues such as high CPU and memory + usage in pods and nodes, as well as stuck Kubernetes Daemonset rollout. Potential incident scenario: + Kubernetes Daemonset rollout stuck due to high CPU and memory usage in pods and nodes. This caused a + long tail of alerts on various topics.""", + }, + { + "role": "user", + "content": f"""Here are alerts of an incident for summarization:\n{incident_description}\n This incident started on + {incident_start}, ended on {incident_end}, included {incident.alerts_count} alerts.""", + }, + ], + ) + .choices[0] + .message.content + ) + + logger.info(f"Generated incident summary with length {len(summary)} symbols", + extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + + if len(summary) > max_summary_length: + logger.info(f"Generated incident summary is too long. Applying smart truncation", + extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + + summary = ( + client.chat.completions.create( + model=model, + messages=[ + { + "role": "system", + "content": f"""You are a very skilled DevOps specialist who can summarize any incident based on a description. + When provided with information, answer with max a {int(max_summary_length * 0.9)} symbols excerpt describing + incident thoroughly. + """, + }, + { + "role": "user", + "content": f"""Here is the description of an incident for summarization:\n{summary}""", + }, + ], + ) + .choices[0] + .message.content + ) + + logger.info(f"Generated new incident summary with length {len(summary)} symbols", + extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + + if len(summary) > max_summary_length: + logger.info(f"Generated incident summary is too long. Applying hard truncation", + extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + summary = summary[: max_summary_length] + + return summary + except Exception as e: + logger.error(f"Error in generating incident summary: {e}", + extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + return "" + + +def generate_incident_name(incident: Incident, generate_name: str = None, max_name_length: int = None, use_n_alerts_for_name: int = -1) -> str: + if "OPENAI_API_KEY" not in os.environ: + logger.error( + "OpenAI API key is not set. Incident name generation is not available.", + extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, + "incident_id": incident.id, "tenant_id": incident.tenant_id} + ) + return "" + + if not generate_name: + generate_name = os.environ.get("GENERATE_INCIDENT_NAME", "True") + + if generate_name == "False": + logger.info(f"Incident name generation is disabled. Aborting.", + extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + return "" + + if incident.user_generated_name: + return "" + + if not max_name_length: + max_name_length = os.environ.get( + "MAX_NAME_LENGTH", MAX_NAME_LENGTH) + + try: + client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) + + incident = get_incident_by_id(incident.tenant_id, incident.id) + + description_strings = np.unique( + [f'{alert.event["name"]}' for alert in incident.alerts]).tolist() + + if use_n_alerts_for_name > 0: + incident_description = "\n".join( + description_strings[:use_n_alerts_for_name]) + else: + incident_description = "\n".join(description_strings) + + timestamps = [alert.timestamp for alert in incident.alerts] + incident_start = min(timestamps).replace(microsecond=0) + + model = os.environ.get("OPENAI_MODEL", "gpt-4o-mini") + + name = client.chat.completions.create(model=model, messages=[ + { + "role": "system", + "content": f"""You are a very skilled DevOps specialist who can name any incident based on alert descriptions. + When provided with information, output a short descriptive name of incident that could cause these alerts. + Add information about start time to the name. ONLY USE WHAT YOU SEE. Answer with max a {int(max_name_length * 0.9)} + symbols excerpt. + + EXAMPLE: + Kubernetes rollout stuck (started on 2022.11.17 14:11)""" + }, + { + "role": "user", + "content": f"""This incident started on {incident_start}. + Here are alerts of an incident:\n{incident_description}\n""" + } + ]).choices[0].message.content + + logger.info(f"Generated incident name with length {len(name)} symbols", + extra={"incident_id": incident.id, "tenant_id": incident.tenant_id}) + + if len(name) > max_name_length: + logger.info(f"Generated incident name is too long. Applying smart truncation", + extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + + name = client.chat.completions.create(model=model, messages=[ + { + "role": "system", + "content": f"""You are a very skilled DevOps specialist who can name any incident based on a description. + Add information about start time to the name.When provided with information, answer with max a + {int(max_name_length * 0.9)} symbols. + + EXAMPLE: + Kubernetes rollout stuck (started on 2022.11.17 14:11)""" + }, + { + "role": "user", + "content": f"""This incident started on {incident_start}. + Here is the description of an incident to name:\n{name}.""" + } + ]).choices[0].message.content + + logger.info(f"Generated new incident name with length {len(name)} symbols", + extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + + if len(name) > max_name_length: + logger.info(f"Generated incident name is too long. Applying hard truncation", + extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + name = name[: max_name_length] + + return name + except Exception as e: + logger.error(f"Error in generating incident name: {e}", + extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) + return "" diff --git a/ee/experimental/graph_utils.py b/ee/experimental/graph_utils.py new file mode 100644 index 0000000000..368e747f9a --- /dev/null +++ b/ee/experimental/graph_utils.py @@ -0,0 +1,106 @@ +import logging + +import numpy as np +import networkx as nx + +from typing import List, Tuple + +from keep.api.core.db import get_pmi_values_from_temp_file + +logger = logging.getLogger(__name__) + +def detect_knee_1d_auto_increasing(y: List[float]) -> Tuple[int, float]: + """ + This function detects the knee point in an increasing 1D curve. Knee point is the point where a curve + starts to flatten out (https://en.wikipedia.org/wiki/Knee_of_a_curve). + + Parameters: + y (List[float]): a list of float values + + Returns: + tuple: knee_index, knee_y + """ + + def detect_knee_1d(y: List[float], curve: str, direction: str = 'increasing') -> Tuple[int, float, List[float]]: + x = np.arange(len(y)) + + x_norm = (x - np.min(x)) / (np.max(x) - np.min(x)) + y_norm = (y - np.min(y)) / (np.max(y) - np.min(y)) + + diff_curve = y_norm - x_norm + + if curve == 'concave': + knee_index = np.argmax(diff_curve) + else: + knee_index = np.argmin(diff_curve) + + knee_y = y[knee_index] + + return knee_index, knee_y, diff_curve + + knee_index_concave, knee_y_concave, diff_curve_concave = detect_knee_1d(y, 'concave') + knee_index_convex, knee_y_convex, diff_curve_convex = detect_knee_1d(y, 'convex') + max_diff_concave = np.max(np.abs(diff_curve_concave)) + max_diff_convex = np.max(np.abs(diff_curve_convex)) + + if max_diff_concave > max_diff_convex: + return knee_index_concave, knee_y_concave + else: + return knee_index_convex, knee_y_convex + + +def create_graph(tenant_id: str, fingerprints: List[str], pmi_values: np.ndarray, fingerprint2idx: dict, pmi_threshold: float = 0., delete_nodes: bool = False, knee_threshold: float = 0.8) -> nx.Graph: + """ + This function creates a graph from a list of fingerprints. The graph is created based on the PMI values between + the fingerprints. The edges are created between the fingerprints that have a PMI value greater than the threshold. + The nodes are removed if the knee point of the PMI values of the edges connected to the node is less than the threshold. + + Parameters: + tenant_id (str): tenant id + fingerprints (List[str]): a list of fingerprints + pmi_threshold (float): PMI threshold + knee_threshold (float): knee threshold + + Returns: + nx.Graph: a graph + """ + graph = nx.Graph() + + if len(fingerprints) == 1: + graph.add_node(fingerprints[0]) + return graph + + logger.info(f'Creating alert graph edges', extra={'tenant_id': tenant_id}) + + for idx_i, fingerprint_i in enumerate(fingerprints): + if fingerprint_i not in fingerprint2idx: + continue + + for idx_j in range(idx_i + 1, len(fingerprints)): + fingerprint_j = fingerprints[idx_j] + + if fingerprint_j not in fingerprint2idx: + continue + + weight = pmi_values[fingerprint2idx[fingerprint_i], fingerprint2idx[fingerprint_j]] + + if weight > pmi_threshold: + graph.add_edge(fingerprint_i, fingerprint_j, weight=weight) + + if delete_nodes: + nodes_to_delete = [] + logger.info(f'Preparing candidate nodes for deletion', extra={'tenant_id': tenant_id}) + + for node in graph.nodes: + weights = sorted([edge['weight'] for edge in graph[node].values()]) + + knee_index, knee_statistic = detect_knee_1d_auto_increasing(weights) + + if knee_statistic < knee_threshold: + nodes_to_delete.append(node) + + logger.info(f'Removing nodes from graph, {len(nodes_to_delete)} nodes will be removed, {len(graph.nodes) - len(nodes_to_delete)} nodes will be left', + extra={'tenant_id': tenant_id}) + graph.remove_nodes_from(nodes_to_delete) + + return graph \ No newline at end of file diff --git a/ee/experimental/incident_utils.py b/ee/experimental/incident_utils.py new file mode 100644 index 0000000000..6593ce8303 --- /dev/null +++ b/ee/experimental/incident_utils.py @@ -0,0 +1,514 @@ +import logging +import os +import math + +import networkx as nx +import numpy as np + +from tqdm import tqdm +from datetime import datetime, timedelta +from typing import Dict, List, Set, Tuple, Any +from arq.connections import ArqRedis + +from ee.experimental.graph_utils import create_graph +from ee.experimental.statistical_utils import get_alert_pmi_matrix +from ee.experimental.generative_utils import generate_incident_summary, generate_incident_name, \ + SUMMARY_GENERATOR_VERBOSE_NAME, NAME_GENERATOR_VERBOSE_NAME + +from keep.api.arq_pool import get_pool +from keep.api.core.dependencies import get_pusher_client +from keep.api.models.db.alert import Alert, Incident +from keep.api.core.db import ( + add_alerts_to_incident_by_incident_id, + create_incident_from_dict, + get_incident_by_id, + get_last_incidents, + query_alerts, + update_incident_summary, + update_incident_name, + write_pmi_matrix_to_temp_file, + get_pmi_values_from_temp_file, + get_tenant_config, + write_tenant_config, +) + +logger = logging.getLogger(__name__) + +ALGORITHM_VERBOSE_NAME = "Correlation algorithm v0.2" +USE_N_HISTORICAL_ALERTS_MINING = 10e4 +USE_N_HISTORICAL_ALERTS_PMI = 10e4 +USE_N_HISTORICAL_INCIDENTS = 10e4 +MIN_ALERT_NUMBER = 100 +INCIDENT_VALIDITY_THRESHOLD = 3600 +ALERT_VALIDITY_THRESHOLD = 3600 +# We assume that incident / alert validity threshold is greater than a size of a batch +STRIDE_DENOMINATOR = 4 +DEFAULT_TEMP_DIR_LOCATION = "./ee/experimental/ai_temp" +PMI_SLIDING_WINDOW = 3600 + +def calculate_pmi_matrix( + ctx: dict | None, # arq context + tenant_id: str, + upper_timestamp: datetime = None, + use_n_historical_alerts: int = None, + sliding_window: int = None, + stride: int = None, + temp_dir: str = None, + offload_config: Dict = None, + min_alert_number: int = None, +) -> dict: + logger.info("Calculating PMI coefficients for alerts", extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + if not upper_timestamp: + upper_timestamp = os.environ.get("PMI_ALERT_UPPER_TIMESTAMP", datetime.now()) + + if not use_n_historical_alerts: + use_n_historical_alerts = os.environ.get( + "PMI_USE_N_HISTORICAL_ALERTS", USE_N_HISTORICAL_ALERTS_PMI) + + if not sliding_window: + sliding_window = os.environ.get("PMI_SLIDING_WINDOW", PMI_SLIDING_WINDOW) + + if not stride: + stride = os.environ.get("PMI_STRIDE", int(sliding_window // STRIDE_DENOMINATOR)) + + if not temp_dir: + temp_dir = os.environ.get("AI_TEMP_FOLDER", DEFAULT_TEMP_DIR_LOCATION) + temp_dir = f"{temp_dir}/{tenant_id}" + os.makedirs(temp_dir, exist_ok=True) + + if not offload_config: + offload_config = os.environ.get("PMI_OFFLOAD_CONFIG", {}) + + if "temp_dir" in offload_config: + offload_config["temp_dir"] = f'{offload_config["temp_dir"]}/{tenant_id}' + os.makedirs(offload_config["temp_dir"], exist_ok=True) + + if not min_alert_number: + min_alert_number = os.environ.get("MIN_ALERT_NUMBER", MIN_ALERT_NUMBER) + + alerts = query_alerts( + tenant_id, limit=use_n_historical_alerts, upper_timestamp=upper_timestamp, sort_ascending=True) + + if len(alerts) < min_alert_number: + logger.info("Not enough alerts to mine incidents", extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + return {"status": "failed", "message": "Not enough alerts to mine incidents"} + + pmi_matrix, pmi_columns = get_alert_pmi_matrix( + alerts, "fingerprint", sliding_window, stride, offload_config) + + return {"status": "success", "pmi_matrix": pmi_matrix, "pmi_columns": pmi_columns} + + +def update_existing_incident(incident: Incident, alerts: List[Alert]) -> Tuple[str, bool]: + add_alerts_to_incident_by_incident_id(incident.tenant_id, incident.id, alerts) + return incident.id, True + + +def create_new_incident(component: Set[str], alerts: List[Alert], + tenant_id: str) -> Tuple[str, bool]: + incident_start_time = min(alert.timestamp for alert in alerts if alert.fingerprint in component) + incident_start_time = incident_start_time.replace(microsecond=0) + + incident = create_incident_from_dict(tenant_id, + {"ai_generated_name": f"Incident started at {incident_start_time}", + "generated_summary": "Summarization is Disabled", + "is_predicted": True}) + add_alerts_to_incident_by_incident_id( + tenant_id, incident.id, [ + alert.id for alert in alerts if alert.fingerprint in component],) + return incident.id, False + + +async def schedule_incident_processing(pool: ArqRedis, tenant_id: str, incident_id: str) -> None: + job_summary = await pool.enqueue_job("process_summary_generation", tenant_id=tenant_id, incident_id=incident_id,) + logger.info(f"Summary generation for incident {incident_id} scheduled, job: {job_summary}", extra={ + "algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "tenant_id": tenant_id, "incident_id": incident_id},) + + job_name = await pool.enqueue_job("process_name_generation", tenant_id=tenant_id, incident_id=incident_id) + logger.info(f"Name generation for incident {incident_id} scheduled, job: {job_name}", extra={ + "algorithm": NAME_GENERATOR_VERBOSE_NAME, "tenant_id": tenant_id, "incident_id": incident_id},) + + +def is_incident_accepting_updates(incident: Incident, current_time: datetime, + incident_validity_threshold: timedelta) -> bool: + return current_time - incident.last_seen_time < incident_validity_threshold + + +def get_component_first_seen_time(component: Set[str], alerts: List[Alert]) -> datetime: + return min(alert.timestamp for alert in alerts if alert.fingerprint in component) + + +def process_graph_component(component: Set[str], batch_incidents: List[Incident], batch_alerts: List[Alert], batch_fingerprints: Set[str], + tenant_id: str, min_incident_size: int, incident_validity_threshold: timedelta) -> Tuple[str, bool]: + is_component_merged = False + for incident in batch_incidents: + incident_fingerprints = set(alert.fingerprint for alert in incident.alerts) + if incident_fingerprints.issubset(component): + if not incident_fingerprints.intersection(batch_fingerprints): + continue + logger.info(f"Found possible extension for incident {incident.id}", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + amendment_time = get_component_first_seen_time(component, batch_alerts) + if is_incident_accepting_updates(incident, amendment_time, incident_validity_threshold): + logger.info(f"Incident {incident.id} is accepting updates.", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + existing_alert_ids = set([alert.id for alert in incident.alerts]) + appendable_alerts = [alert for alert in batch_alerts if alert.fingerprint in component and not alert.id in existing_alert_ids] + + logger.info(f"Appending {len(appendable_alerts)} alerts to incident {incident.id}", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + is_component_merged = True + return update_existing_incident_inmem(incident, appendable_alerts) + else: + logger.info(f"Incident {incident.id} is not accepting updates. Aborting merge operation.", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + if not is_component_merged: + if len(component) >= min_incident_size: + logger.info(f"Creating new incident with {len(component)} alerts", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + return create_new_incident_inmem(component, batch_alerts, tenant_id) + else: + return None, False + + +def process_alert_batch(batch_alerts: List[Alert], batch_incidents: list[Incident], tenant_id: str, min_incident_size: int, + incident_validity_threshold: timedelta, pmi_values, fingerpint2idx, pmi_threshold, delete_nodes, knee_threshold) -> Tuple[str, bool]: + + batch_fingerprints = set([alert.fingerprint for alert in batch_alerts]) + + amended_fingerprints = set(batch_fingerprints) + for incident in batch_incidents: + incident_fingerprints = set(alert.fingerprint for alert in incident.alerts) + + amended_fingerprints = incident_fingerprints.union(batch_fingerprints) + + logger.info("Building alert graph", extra={"tenant_id": tenant_id, "algorithm": NAME_GENERATOR_VERBOSE_NAME}) + amended_graph = create_graph(tenant_id, list(amended_fingerprints), pmi_values, + fingerpint2idx, pmi_threshold, delete_nodes, knee_threshold) + + logger.info("Analyzing alert graph", extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + batch_incident_ids_for_processing = [] + batch_new_incidents = [] + batch_updated_incidents = [] + + for component in nx.connected_components(amended_graph): + incident, is_updated = process_graph_component(component, batch_incidents, batch_alerts, batch_fingerprints, tenant_id, min_incident_size, incident_validity_threshold) + if incident: + batch_incident_ids_for_processing.append(incident.id) + if is_updated: + batch_updated_incidents.append(incident) + else: + batch_new_incidents.append(incident) + + return batch_incident_ids_for_processing, batch_new_incidents, batch_updated_incidents + + +async def generate_update_incident_summary(ctx, tenant_id: str, incident_id: str): + incident = get_incident_by_id(tenant_id, incident_id) + summary = generate_incident_summary(incident) + + if summary: + update_incident_summary(tenant_id, incident_id, summary) + + return summary + + +async def generate_update_incident_name(ctx, tenant_id: str, incident_id: str): + incident = get_incident_by_id(tenant_id, incident_id) + name = generate_incident_name(incident) + + if name: + update_incident_name(tenant_id, incident_id, name) + + return name + + +def get_last_incidents_inmem(incidents: List[Incident], upper_timestamp: datetime, lower_timestamp: datetime) -> List[Incident]: + return [incident for incident in incidents if lower_timestamp < incident.last_seen_time < upper_timestamp] + + +def add_alerts_to_incident_by_incident_id_inmem(incident: Incident, alerts: List[str]): + incident.alerts.extend(alerts) + return incident + + +def create_incident_from_dict_inmem(tenant_id: str, incident_dict: Dict[str, Any]) -> Incident: + return Incident(tenant_id=tenant_id, **incident_dict) + + +def create_new_incident_inmem(component: Set[str], alerts: List[Alert], tenant_id: str) -> Tuple[Incident, bool]: + incident_start_time = min(alert.timestamp for alert in alerts if alert.fingerprint in component) + incident_start_time = incident_start_time.replace(microsecond=0) + + incident = create_incident_from_dict_inmem(tenant_id, + {"name": f"Incident started at {incident_start_time}", + "description": "Summarization is Disabled", + "is_predicted": True}) + + incident = add_alerts_to_incident_by_incident_id_inmem( + incident, [alert for alert in alerts if alert.fingerprint in component],) + incident.last_seen_time = max([alert.timestamp for alert in incident.alerts]) + + return incident, False + + +def update_existing_incident_inmem(incident: Incident, alerts: List[str]) -> Tuple[str, bool]: + incident = add_alerts_to_incident_by_incident_id_inmem(incident, alerts) + incident.last_seen_time = max([alert.timestamp for alert in incident.alerts]) + return incident, True + + +def update_incident_summary_inmem(incident: Incident, summary: str): + incident.summary = summary + return incident + + +def update_incident_name_inmem(incident: Incident, name: str): + incident.name = name + return incident + + +async def mine_incidents_and_create_objects( + ctx: dict | None, # arq context + tenant_id: str, + alert_lower_timestamp: datetime = None, + alert_upper_timestamp: datetime = None, + use_n_historical_alerts: int = None, + incident_lower_timestamp: datetime = None, + incident_upper_timestamp: datetime = None, + use_n_historical_incidents: int = None, + pmi_threshold: float = None, + delete_nodes: bool = None, + knee_threshold: float = None, + min_incident_size: int = None, + min_alert_number: int = None, + incident_similarity_threshold: float = None, + incident_validity_threshold: timedelta = None, + general_temp_dir: str = None, + alert_validity_threshold: int = None, +) -> Dict[str, List[Incident]]: + """ + This function mines incidents from alerts and creates incidents in the database. + + Parameters: + tenant_id (str): tenant id + alert_lower_timestamp (datetime): lower timestamp for alerts + alert_upper_timestamp (datetime): upper timestamp for alerts + use_n_historical_alerts (int): number of historical alerts to use + incident_lower_timestamp (datetime): lower timestamp for incidents + incident_upper_timestamp (datetime): upper timestamp for incidents + use_n_historical_incidents (int): number of historical incidents to use + pmi_threshold (float): PMI threshold used for incident graph edges creation + knee_threshold (float): knee threshold used for incident graph nodes creation + min_incident_size (int): minimum incident size + incident_similarity_threshold (float): incident similarity threshold + + Returns: + Dict[str, List[Incident]]: a dictionary containing the created incidents + """ + # obtain tenant_config + if not general_temp_dir: + general_temp_dir = os.environ.get( + "AI_TEMP_FOLDER", DEFAULT_TEMP_DIR_LOCATION) + + temp_dir = f"{general_temp_dir}/{tenant_id}" + os.makedirs(temp_dir, exist_ok=True) + + tenant_config = get_tenant_config(tenant_id) + + # obtain alert-related parameters + alert_validity_threshold = int(os.environ.get("ALERT_VALIDITY_THRESHOLD", ALERT_VALIDITY_THRESHOLD)) + alert_batch_stride = alert_validity_threshold // STRIDE_DENOMINATOR + + if not alert_upper_timestamp: + alert_upper_timestamp = os.environ.get( + "MINE_ALERT_UPPER_TIMESTAMP", datetime.now()) + + if not alert_lower_timestamp: + if tenant_config.get("last_correlated_batch_start", None): + alert_lower_timestamp = datetime.fromisoformat( + tenant_config.get("last_correlated_batch_start", None)) + + else: + alert_lower_timestamp = None + + if not use_n_historical_alerts: + use_n_historical_alerts = os.environ.get( + "MINE_USE_N_HISTORICAL_ALERTS", + USE_N_HISTORICAL_ALERTS_MINING) + + # obtain incident-related parameters + if not incident_validity_threshold: + incident_validity_threshold = timedelta( + seconds=int(os.environ.get("MINE_INCIDENT_VALIDITY", INCIDENT_VALIDITY_THRESHOLD))) + + if not use_n_historical_incidents: + use_n_historical_incidents = os.environ.get( + "MINE_USE_N_HISTORICAL_INCIDENTS", USE_N_HISTORICAL_INCIDENTS) + + if not incident_similarity_threshold: + incident_similarity_threshold = os.environ.get("INCIDENT_SIMILARITY_THRESHOLD", 0.8) + + if not min_incident_size: + min_incident_size = os.environ.get("MIN_INCIDENT_SIZE", 5) + + if not pmi_threshold: + pmi_threshold = os.environ.get("PMI_THRESHOLD", 0.0) + + if not delete_nodes: + delete_nodes = os.environ.get("DELETE_NODES", False) + + if not knee_threshold: + knee_threshold = os.environ.get("KNEE_THRESHOLD", 0.8) + + status = calculate_pmi_matrix(ctx, tenant_id, min_alert_number=min_alert_number) + if status.get("status") == "failed": + pusher_client = get_pusher_client() + if pusher_client: + log_string = f"{ALGORITHM_VERBOSE_NAME} failed to calculate PMI matrix" + pusher_client.trigger(f"private-{tenant_id}", "ai-logs-change", {"log": "Failed to calculate PMI matrix"}) + + return {"incidents": []} + + elif status.get("status") == "success": + logger.info( + f"Calculating PMI coefficients for alerts finished. PMI matrix is being written to the database. Total number of PMI coefficients: {status.get('pmi_matrix').size}", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + pmi_values = status.get("pmi_matrix") + fingerprints = status.get("pmi_columns") + write_pmi_matrix_to_temp_file(tenant_id, pmi_values, fingerprints, temp_dir) + + logger.info("PMI matrix is written to the database.", extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + fingerprint2idx = {fingerprint: i for i, fingerprint in enumerate(fingerprints)} + logger.info("Getting new alerts and incidents", extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + alerts = query_alerts(tenant_id, limit=use_n_historical_alerts, upper_timestamp=alert_upper_timestamp, + lower_timestamp=alert_lower_timestamp, sort_ascending=True) + + if not alert_lower_timestamp: + alert_lower_timestamp = min(alert.timestamp for alert in alerts) + + incidents, _ = get_last_incidents(tenant_id, limit=use_n_historical_incidents, upper_timestamp=alert_lower_timestamp + incident_validity_threshold, + lower_timestamp=alert_upper_timestamp - incident_validity_threshold, with_alerts=True) + + n_batches = int(math.ceil((alert_upper_timestamp - alert_lower_timestamp).total_seconds() / alert_batch_stride)) - (STRIDE_DENOMINATOR - 1) + logging.info( + f"Starting alert correlation. Current batch size: {alert_validity_threshold} seconds. Current \ + batch stride: {alert_batch_stride} seconds. Number of batches to process: {n_batches}") + + pool = await get_pool() if not ctx else ctx["redis"] + + new_incident_ids = [] + updated_incident_ids = [] + incident_ids_for_processing = [] + + alert_timestamps = np.array([alert.timestamp.timestamp() for alert in alerts]) + batch_indices = np.arange(0, n_batches) + batch_start_ts = alert_lower_timestamp.timestamp() + np.array([batch_idx * alert_batch_stride for batch_idx in batch_indices]) + batch_end_ts = batch_start_ts + alert_validity_threshold + + start_indices = np.searchsorted(alert_timestamps, batch_start_ts, side='left') + end_indices = np.searchsorted(alert_timestamps, batch_end_ts, side='right') + + for batch_idx, (start_idx, end_idx) in tqdm(enumerate(zip(start_indices, end_indices)), total=n_batches, desc="Processing alert batches.."): + batch_alerts = alerts[start_idx:end_idx] + + logger.info( + f"Processing batch {batch_idx} with start timestamp {datetime.fromtimestamp(batch_start_ts[batch_idx])} \ + and end timestamp {min(datetime.fromtimestamp(batch_end_ts[batch_idx]), alert_upper_timestamp)}. Batch size: {len(batch_alerts)}", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + if len(batch_alerts) == 0: + continue + + batch_incidents = get_last_incidents_inmem(incidents, datetime.fromtimestamp(batch_end_ts[batch_idx]), + datetime.fromtimestamp(batch_start_ts[batch_idx]) - incident_validity_threshold) + + logger.info( + f"Found {len(batch_incidents)} incidents that accept updates by {datetime.fromtimestamp(batch_start_ts[batch_idx])}.", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + batch_incident_ids_for_processing, batch_new_incidents, batch_updated_incidents = process_alert_batch( + batch_alerts, batch_incidents, tenant_id, min_incident_size, incident_validity_threshold, pmi_values, fingerprint2idx, pmi_threshold, delete_nodes, knee_threshold) + + new_incident_ids.extend([incident.id for incident in batch_new_incidents]) + incidents.extend(batch_new_incidents) + updated_incident_ids.extend([incident.id for incident in batch_updated_incidents]) + incident_ids_for_processing.extend(batch_incident_ids_for_processing) + + logger.info(f"Saving last correlated batch start timestamp: {datetime.isoformat(alert_lower_timestamp + timedelta(seconds= (n_batches - 1) * alert_batch_stride))}", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + tenant_config["last_correlated_batch_start"] = datetime.isoformat(alert_lower_timestamp + timedelta(seconds= (n_batches - 1) * alert_batch_stride)) + write_tenant_config(tenant_id, tenant_config) + + logger.info(f"Writing {len(incidents)} incidents to database", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + db_incident_ids_for_processing = [] + db_new_incident_ids = [] + db_updated_incident_ids = [] + for incident in incidents: + if not get_incident_by_id(tenant_id, incident.id): + incident_dict = { + "ai_generated_name": incident.ai_generated_name, + "generated_summary": incident.generated_summary, + "is_predicted": True, + } + db_incident = create_incident_from_dict(tenant_id, incident_dict) + + incident_id = db_incident.id + else: + incident_id = incident.id + + if incident.id in incident_ids_for_processing: + db_incident_ids_for_processing.append(incident_id) + + if incident.id in new_incident_ids: + db_new_incident_ids.append(incident_id) + + if incident.id in updated_incident_ids: + db_updated_incident_ids.append(incident_id) + + + add_alerts_to_incident_by_incident_id(tenant_id, incident_id, [alert.id for alert in incident.alerts]) + + logger.info(f"Scheduling {len(db_incident_ids_for_processing)} incidents for name / summary generation", + extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + new_incident_count = len(set(new_incident_ids)) + updated_incident_count = len(set(updated_incident_ids).difference(set(new_incident_ids))) + db_incident_ids_for_processing = list(set(db_incident_ids_for_processing)) + for incident_id in db_incident_ids_for_processing: + await schedule_incident_processing(pool, tenant_id, incident_id) + + incident_ids = list(set(db_new_incident_ids + db_updated_incident_ids)) + + pusher_client = get_pusher_client() + if pusher_client: + if new_incident_count > 0 or updated_incident_count > 0: + log_string = f"{ALGORITHM_VERBOSE_NAME} successfully executed. Alerts from {alert_lower_timestamp.replace(microsecond=0)} \ + till {alert_upper_timestamp.replace(microsecond=0)} were processed. Total count of processed alerts: {len(alerts)}. \ + Total count of created incidents: {new_incident_count}. Total count of updated incidents: \ + {updated_incident_count}." + elif len(alerts) > 0: + log_string = f'{ALGORITHM_VERBOSE_NAME} successfully executed. Alerts from {alert_lower_timestamp.replace(microsecond=0)} \ + till {alert_upper_timestamp.replace(microsecond=0)} were processed. Total count of processed alerts: {len(alerts)}. \ + Total count of created incidents: {new_incident_count}. Total count of updated incidents: \ + {updated_incident_count}. This may be due to high alert sparsity or low amount of unique \ + alert fingerprints. Adding more alerts, increasing "sliding window size" or decreasing minimal amount of \ + "minimal amount of unique fingerprints in an incident" configuration parameters may help.' + + else: + log_string = f'{ALGORITHM_VERBOSE_NAME} successfully executed. Alerts from {alert_lower_timestamp.replace(microsecond=0)} \ + till {alert_upper_timestamp.replace(microsecond=0)} were processed. Total count of processed alerts: {len(alerts)}. \ + No incidents were created or updated. Add alerts to the system to enable automatic incident creation.' + + pusher_client.trigger(f"private-{tenant_id}", "ai-logs-change", {"log": log_string}) + + logger.info("Client notified on new AI log", extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}) + + return {"incidents": [get_incident_by_id(tenant_id, incident_id) + for incident_id in incident_ids]} \ No newline at end of file diff --git a/ee/experimental/statistical_utils.py b/ee/experimental/statistical_utils.py new file mode 100644 index 0000000000..39278a4446 --- /dev/null +++ b/ee/experimental/statistical_utils.py @@ -0,0 +1,184 @@ +import os +import logging + +import numpy as np +import pandas as pd + +from datetime import datetime +from typing import List, Tuple, Dict +from scipy.sparse import csr_matrix + +from keep.api.models.db.alert import Alert + +logger = logging.getLogger(__name__) + +def get_batched_alert_counts(alerts: pd.DataFrame, unique_alert_identifier: str, sliding_window_size: int, step_size: int) -> pd.DataFrame: + """ + This function calculates number of alerts per sliding window. + + Parameters: + alerts (pd.DataFrame): a DataFrame containing alerts + unique_alert_identifier (str): a unique identifier for alerts + sliding_window_size (int): sliding window size in seconds + step_size (int): step size in seconds + + Returns: + rolling_counts (pd.DataFrame): a DataFrame containing the number of alerts per sliding window + """ + + resampled_alert_counts = alerts.set_index('starts_at').resample(f'{step_size}s')[unique_alert_identifier].value_counts().unstack(fill_value=0) + rolling_counts = resampled_alert_counts.rolling(window=f'{sliding_window_size}s', min_periods=1).sum() + + return rolling_counts + + +def get_batched_alert_occurrences(alerts: pd.DataFrame, unique_alert_identifier: str, sliding_window_size: int, step_size: int) -> pd.DataFrame: + """ + This function calculates occurrences of alerts per sliding window. + + Parameters: + alerts (pd.DataFrame): a DataFrame containing alerts + unique_alert_identifier (str): a unique identifier for alerts + sliding_window_size (int): sliding window size in seconds + step_size (int): step size in seconds + + Returns: + alert_occurences (pd.DataFrame): a DataFrame containing the occurrences of alerts per sliding window + """ + + alert_counts = get_batched_alert_counts(alerts, unique_alert_identifier, sliding_window_size, step_size) + alert_occurences = pd.DataFrame(np.where(alert_counts > 0, 1, 0), index=alert_counts.index, columns=alert_counts.columns) + + return alert_occurences + +def get_jaccard_scores(P_a: np.array, P_aa: np.array) -> np.array: + """ + This function calculates the Jaccard similarity scores between recurring events. + + Parameters: + P_a (np.array): a 1D array containing the probabilities of events + P_aa (np.array): a 2D array containing the probabilities of joint events + + Returns: + jaccard_matrix (np.array): a 2D array containing the Jaccard similarity scores between events + """ + + P_a_matrix = P_a[:, None] + P_a + union_matrix = P_a_matrix - P_aa + + with np.errstate(divide='ignore', invalid='ignore'): + jaccard_matrix = np.where(union_matrix != 0, P_aa / union_matrix, 0) + + np.fill_diagonal(jaccard_matrix, 1) + + return jaccard_matrix + + +def get_alert_jaccard_matrix(alerts: pd.DataFrame, unique_alert_identifier: str, sliding_window_size: int, step_size: int) -> pd.DataFrame: + """ + This function calculates Jaccard similarity scores between alert groups (fingerprints). + + Parameters: + alerts (pd.DataFrame): a DataFrame containing alerts + unique_alert_identifier (str): a unique identifier for alerts + sliding_window_size (int): sliding window size in seconds + step_size (int): step size in seconds + + Returns: + jaccard_scores_df (pd.DataFrame): a DataFrame containing the Jaccard similarity scores between alert groups + """ + + alert_occurrences_df = get_batched_alert_occurrences(alerts, unique_alert_identifier, sliding_window_size, step_size) + alert_occurrences = alert_occurrences_df.to_numpy() + + alert_probabilities = np.mean(alert_occurrences, axis=0) + joint_alert_occurrences = np.dot(alert_occurrences.T, alert_occurrences) + pairwise_alert_probabilities = joint_alert_occurrences / alert_occurrences.shape[0] + + jaccard_scores = get_jaccard_scores(alert_probabilities, pairwise_alert_probabilities) + jaccard_scores_df = pd.DataFrame(jaccard_scores, index=alert_occurrences_df.columns, columns=alert_occurrences_df.columns) + + return jaccard_scores_df + +def get_alert_pmi_matrix(alerts: List[Alert], + unique_alert_identifier: str, + sliding_window_size: int, + step_size: int, + offload_config: Dict = {}) -> Tuple[np.array, List[str]]: + """ + This funciton calculates PMI scores between alert groups (fingerprints). + + Parameters: + alerts List[Alert]: a list containing alerts + unique_alert_identifier (str): a unique identifier for alerts + sliding_window_size (int): sliding window size in seconds + step_size (int): step size in seconds + + Returns: + pmi_matrix (np.array): a 2D array containing the PMI scores between alert fingerprints + alert_occurences_df.columns (List[str]): a list containing the alert fingerprints + """ + + alert_dict = { + 'fingerprint': [alert.fingerprint for alert in alerts], + 'starts_at': [alert.timestamp for alert in alerts], + } + + if offload_config: + temp_dir = offload_config.get('temp_dir', None) + + alert_df = pd.DataFrame(alert_dict) + alert_occurences_df = get_batched_alert_occurrences(alert_df, unique_alert_identifier, sliding_window_size, step_size) + logger.info('Windowed alert occurrences calculated.') + + alert_occurrences = alert_occurences_df.to_numpy() + alert_probabilities = np.mean(alert_occurrences, axis=0) + logger.info('Alert probabilities calculated.') + + alert_occurrences = csr_matrix(alert_occurrences) + + if offload_config: + joint_alert_occurrences = np.memmap(f'{temp_dir}/joint_alert_occurrences.dat', dtype='float16', mode='w+', + shape=(alert_occurrences.shape[1], alert_occurrences.shape[1])) + else: + joint_alert_occurrences = np.zeros((alert_occurrences.shape[1], alert_occurrences.shape[1]), dtype=np.float16) + + joint_alert_occurrences[:] = alert_occurrences.T.dot(alert_occurrences).toarray() + logger.info('Joint alert occurrences calculated.') + + if offload_config: + pairwise_alert_probabilities = np.memmap(f'{temp_dir}/pairwise_alert_probabilities.dat', dtype='float16', mode='w+', + shape=(joint_alert_occurrences.shape[0], joint_alert_occurrences.shape[1])) + else: + pairwise_alert_probabilities = np.zeros((joint_alert_occurrences.shape[0], joint_alert_occurrences.shape[1]), dtype=np.float16) + + pairwise_alert_probabilities[:] = joint_alert_occurrences / alert_occurrences.shape[0] + logger.info('Pairwise alert probabilities calculated.') + + if offload_config: + dense_pmi_matrix = np.memmap(f'{temp_dir}/dense_pmi_matrix.dat', dtype='float16', mode='w+', + shape=(pairwise_alert_probabilities.shape[0], pairwise_alert_probabilities.shape[1])) + else: + dense_pmi_matrix = np.zeros((pairwise_alert_probabilities.shape[0], pairwise_alert_probabilities.shape[1]), dtype=np.float16) + + dense_pmi_matrix[:] = np.log(pairwise_alert_probabilities / + (alert_probabilities[:, None] * alert_probabilities)) + logger.info('PMI matrix calculated.') + + dense_pmi_matrix[np.isnan(dense_pmi_matrix)] = 0 + np.fill_diagonal(dense_pmi_matrix, 0) + pmi_matrix = np.clip(dense_pmi_matrix, -100, 100) + logger.info('PMI matrix modified.') + + if offload_config: + joint_alert_occurrences._mmap.close() + pairwise_alert_probabilities._mmap.close() + dense_pmi_matrix._mmap.close() + + os.remove(f'{temp_dir}/joint_alert_occurrences.dat') + os.remove(f'{temp_dir}/pairwise_alert_probabilities.dat') + os.remove(f'{temp_dir}/dense_pmi_matrix.dat') + + logger.info(f'Temporary files removed from {temp_dir}.') + + return pmi_matrix, alert_occurences_df.columns \ No newline at end of file diff --git a/keep/alertmanager/__init__.py b/ee/identitymanager/__init__.py similarity index 100% rename from keep/alertmanager/__init__.py rename to ee/identitymanager/__init__.py diff --git a/keep/providers/snowflake_provider/snowflake_input_provider.py b/ee/identitymanager/identity_managers/__init__.py similarity index 100% rename from keep/providers/snowflake_provider/snowflake_input_provider.py rename to ee/identitymanager/identity_managers/__init__.py diff --git a/ee/identitymanager/identity_managers/auth0/__init__.py b/ee/identitymanager/identity_managers/auth0/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ee/identitymanager/identity_managers/auth0/auth0_authverifier.py b/ee/identitymanager/identity_managers/auth0/auth0_authverifier.py new file mode 100644 index 0000000000..ea5a5987a9 --- /dev/null +++ b/ee/identitymanager/identity_managers/auth0/auth0_authverifier.py @@ -0,0 +1,66 @@ +import os + +import jwt +from fastapi import HTTPException + +from keep.identitymanager.authenticatedentity import AuthenticatedEntity +from keep.identitymanager.authverifierbase import AuthVerifierBase +from keep.identitymanager.rbac import Admin as AdminRole + +# Note: cache_keys is set to True to avoid fetching the jwks keys on every request +auth_domain = os.environ.get("AUTH0_DOMAIN") +if auth_domain: + jwks_uri = f"https://{auth_domain}/.well-known/jwks.json" + jwks_client = jwt.PyJWKClient( + jwks_uri, cache_keys=True, headers={"User-Agent": "keep-api"} + ) +else: + jwks_client = None + + +class Auth0AuthVerifier(AuthVerifierBase): + """Handles authentication and authorization for multi tenant mode""" + + def __init__(self, scopes: list[str] = []) -> None: + # TODO: this verifier should be instantiated once and not for every endpoint/route + # to better cache the jwks keys + super().__init__(scopes) + # init once so the cache will actually work + self.auth_domain = os.environ.get("AUTH0_DOMAIN") + if not self.auth_domain: + raise Exception("Missing AUTH0_DOMAIN environment variable") + self.jwks_uri = f"https://{self.auth_domain}/.well-known/jwks.json" + # Note: cache_keys is set to True to avoid fetching the jwks keys on every request + # but it currently caches only per-route. After moving this auth verifier to be a singleton, we can cache it globally + self.issuer = f"https://{self.auth_domain}/" + self.auth_audience = os.environ.get("AUTH0_AUDIENCE") + + def _verify_bearer_token(self, token) -> AuthenticatedEntity: + from opentelemetry import trace + + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span("verify_bearer_token"): + if not token: + raise HTTPException(status_code=401, detail="No token provided ๐Ÿ‘ˆ") + try: + jwt_signing_key = jwks_client.get_signing_key_from_jwt(token).key + payload = jwt.decode( + token, + jwt_signing_key, + algorithms="RS256", + audience=self.auth_audience, + issuer=self.issuer, + leeway=60, + ) + tenant_id = payload.get("keep_tenant_id") + role_name = payload.get( + "keep_role", AdminRole.get_name() + ) # default to admin for backwards compatibility + email = payload.get("email") + return AuthenticatedEntity(tenant_id, email, role=role_name) + except jwt.exceptions.DecodeError: + self.logger.exception("Failed to decode token") + raise HTTPException(status_code=401, detail="Token is not a valid JWT") + except Exception as e: + self.logger.exception("Failed to validate token") + raise HTTPException(status_code=401, detail=str(e)) diff --git a/ee/identitymanager/identity_managers/auth0/auth0_identitymanager.py b/ee/identitymanager/identity_managers/auth0/auth0_identitymanager.py new file mode 100644 index 0000000000..03c5dcfc7d --- /dev/null +++ b/ee/identitymanager/identity_managers/auth0/auth0_identitymanager.py @@ -0,0 +1,133 @@ +import os +import secrets + +import jwt +from fastapi import HTTPException + +from ee.identitymanager.identity_managers.auth0.auth0_authverifier import ( + Auth0AuthVerifier, +) +from ee.identitymanager.identity_managers.auth0.auth0_utils import getAuth0Client +from keep.api.models.user import User +from keep.contextmanager.contextmanager import ContextManager +from keep.identitymanager.identitymanager import BaseIdentityManager +from keep.identitymanager.rbac import Admin as AdminRole + + +class Auth0IdentityManager(BaseIdentityManager): + def __init__(self, tenant_id, context_manager: ContextManager, **kwargs): + super().__init__(tenant_id, context_manager, **kwargs) + self.logger.info("Auth0IdentityManager initialized") + self.domain = os.environ.get("AUTH0_DOMAIN") + self.client_id = os.environ.get("AUTH0_CLIENT_ID") + self.client_secret = os.environ.get("AUTH0_CLIENT_SECRET") + self.audience = f"https://{self.domain}/api/v2/" + self.jwks_client = jwt.PyJWKClient( + f"https://{self.domain}/.well-known/jwks.json", + cache_keys=True, + headers={"User-Agent": "keep-api"}, + ) + + def get_users(self) -> list[User]: + return self._get_users_auth0(self.tenant_id) + + def _get_users_auth0(self, tenant_id: str) -> list[User]: + auth0 = getAuth0Client() + users = auth0.users.list(q=f'app_metadata.keep_tenant_id:"{tenant_id}"') + users = [ + User( + email=user["email"], + name=user["name"], + # for backwards compatibility we return admin if no role is set + role=user.get("app_metadata", {}).get( + "keep_role", AdminRole.get_name() + ), + last_login=user.get("last_login", None), + created_at=user["created_at"], + picture=user["picture"], + ) + for user in users.get("users", []) + ] + return users + + def create_user(self, user_email: str, role: str, **kwargs) -> dict: + return self._create_user_auth0(user_email, self.tenant_id, role) + + def delete_user(self, user_email: str) -> dict: + auth0 = getAuth0Client() + users = auth0.users.list(q=f'app_metadata.keep_tenant_id:"{self.tenant_id}"') + for user in users.get("users", []): + if user["email"] == user_email: + auth0.users.delete(user["user_id"]) + return {"status": "OK"} + raise HTTPException(status_code=404, detail="User not found") + + def get_auth_verifier(self, scopes) -> Auth0AuthVerifier: + return Auth0AuthVerifier(scopes) + + def _create_user_auth0(self, user_email: str, tenant_id: str, role: str) -> dict: + auth0 = getAuth0Client() + # User email can exist in 1 tenant only for now. + users = auth0.users.list(q=f'email:"{user_email}"') + if users.get("users", []): + raise HTTPException(status_code=409, detail="User already exists") + user = auth0.users.create( + { + "email": user_email, + "password": secrets.token_urlsafe(13), + "email_verified": True, + "app_metadata": {"keep_tenant_id": tenant_id, "keep_role": role}, + "connection": os.environ.get("AUTH0_DB_NAME", "keep-users"), + } + ) + user_dto = User( + email=user["email"], + name=user["name"], + # for backwards compatibility we return admin if no role is set + role=user.get("app_metadata", {}).get("keep_role", AdminRole.get_name()), + last_login=user.get("last_login", None), + created_at=user["created_at"], + picture=user["picture"], + ) + return user_dto + + def update_user(self, user_email: str, update_data: dict) -> User: + auth0 = getAuth0Client() + users = auth0.users.list( + q=f'email:"{user_email}" AND app_metadata.keep_tenant_id:"{self.tenant_id}"' + ) + if not users.get("users", []): + raise HTTPException(status_code=404, detail="User not found") + + user = users["users"][0] + user_id = user["user_id"] + + update_body = {} + if "email" in update_data and update_data["email"]: + update_body["email"] = update_data["email"] + if "password" in update_data and update_data["password"]: + update_body["password"] = update_data["password"] + if "role" in update_data and update_data["role"]: + update_body["app_metadata"] = user.get("app_metadata", {}) + update_body["app_metadata"]["keep_role"] = update_data["role"] + if "groups" in update_data and update_data["groups"]: + # Assuming groups are stored in app_metadata + if "app_metadata" not in update_body: + update_body["app_metadata"] = user.get("app_metadata", {}) + update_body["app_metadata"]["groups"] = update_data["groups"] + + try: + updated_user = auth0.users.update(user_id, update_body) + return User( + email=updated_user["email"], + name=updated_user["name"], + role=updated_user.get("app_metadata", {}).get( + "keep_role", AdminRole.get_name() + ), + last_login=updated_user.get("last_login", None), + created_at=updated_user["created_at"], + picture=updated_user["picture"], + ) + except Exception as e: + self.logger.error(f"Error updating user: {str(e)}") + raise HTTPException(status_code=500, detail="Failed to update user") diff --git a/ee/identitymanager/identity_managers/auth0/auth0_utils.py b/ee/identitymanager/identity_managers/auth0/auth0_utils.py new file mode 100644 index 0000000000..d56c43b884 --- /dev/null +++ b/ee/identitymanager/identity_managers/auth0/auth0_utils.py @@ -0,0 +1,15 @@ +from auth0.authentication import GetToken +from auth0.management import Auth0 + +from keep.api.core.config import config + + +def getAuth0Client() -> Auth0: + AUTH0_DOMAIN = config("AUTH0_MANAGEMENT_DOMAIN") + AUTH0_CLIENT_ID = config("AUTH0_CLIENT_ID") + AUTH0_CLIENT_SECRET = config("AUTH0_CLIENT_SECRET") + get_token = GetToken(AUTH0_DOMAIN, AUTH0_CLIENT_ID, AUTH0_CLIENT_SECRET) + token = get_token.client_credentials("https://{}/api/v2/".format(AUTH0_DOMAIN)) + mgmt_api_token = token["access_token"] + auth0 = Auth0(AUTH0_DOMAIN, mgmt_api_token) + return auth0 diff --git a/ee/identitymanager/identity_managers/keycloak/__init__.py b/ee/identitymanager/identity_managers/keycloak/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ee/identitymanager/identity_managers/keycloak/keycloak_authverifier.py b/ee/identitymanager/identity_managers/keycloak/keycloak_authverifier.py new file mode 100644 index 0000000000..1daf5bb6a3 --- /dev/null +++ b/ee/identitymanager/identity_managers/keycloak/keycloak_authverifier.py @@ -0,0 +1,94 @@ +import os + +from fastapi import Depends, HTTPException + +from keep.identitymanager.authenticatedentity import AuthenticatedEntity +from keep.identitymanager.authverifierbase import AuthVerifierBase, oauth2_scheme +from keycloak import KeycloakOpenID, KeycloakOpenIDConnection +from keycloak.keycloak_uma import KeycloakUMA +from keycloak.uma_permissions import UMAPermission + + +class KeycloakAuthVerifier(AuthVerifierBase): + """Handles authentication and authorization for Keycloak""" + + def __init__(self, scopes: list[str] = []) -> None: + super().__init__(scopes) + self.keycloak_url = os.environ.get("KEYCLOAK_URL") + self.keycloak_realm = os.environ.get("KEYCLOAK_REALM") + self.keycloak_client_id = os.environ.get("KEYCLOAK_CLIENT_ID") + self.keycloak_audience = os.environ.get("KEYCLOAK_AUDIENCE") + if ( + not self.keycloak_url + or not self.keycloak_realm + or not self.keycloak_client_id + ): + raise Exception( + "Missing KEYCLOAK_URL, KEYCLOAK_REALM or KEYCLOAK_CLIENT_ID environment variable" + ) + + self.keycloak_client = KeycloakOpenID( + server_url=self.keycloak_url, + realm_name=self.keycloak_realm, + client_id=self.keycloak_client_id, + client_secret_key=os.environ.get("KEYCLOAK_CLIENT_SECRET"), + ) + self.keycloak_openid_connection = KeycloakOpenIDConnection( + server_url=self.keycloak_url, + realm_name=self.keycloak_realm, + client_id=self.keycloak_client_id, + client_secret_key=os.environ.get("KEYCLOAK_CLIENT_SECRET"), + ) + self.keycloak_uma = KeycloakUMA(connection=self.keycloak_openid_connection) + # will be populated in on_start of the identity manager + self.protected_resource = None + + def _verify_bearer_token( + self, token: str = Depends(oauth2_scheme) + ) -> AuthenticatedEntity: + # verify keycloak token + try: + payload = self.keycloak_client.decode_token(token, validate=True) + except Exception: + raise HTTPException(status_code=401, detail="Invalid Keycloak token") + tenant_id = payload.get("keep_tenant_id") + email = payload.get("preferred_username") + org_id = payload.get("active_organization", {}).get("id") + org_realm = payload.get("active_organization", {}).get("name") + role = ( + payload.get("resource_access", {}) + .get(self.keycloak_client_id, {}) + .get("roles", []) + ) + # filter out uma_protection + role = [r for r in role if not r.startswith("uma_protection")] + if not role: + raise HTTPException( + status_code=401, detail="Invalid Keycloak token - no role" + ) + + role = role[0] + return AuthenticatedEntity( + tenant_id, + email, + None, + role, + org_id=org_id, + org_realm=org_realm, + token=token, + ) + + def _authorize(self, authenticated_entity: AuthenticatedEntity) -> None: + # use Keycloak's UMA to authorize + try: + permission = UMAPermission( + resource=self.protected_resource, + scope=self.scopes[0], # todo: handle multiple scopes per resource + ) + allowed = self.keycloak_uma.permissions_check( + token=authenticated_entity.token, permissions=[permission] + ) + # secure fallback + except Exception: + raise HTTPException(status_code=401, detail="Permission check failed") + return allowed diff --git a/ee/identitymanager/identity_managers/keycloak/keycloak_identitymanager.py b/ee/identitymanager/identity_managers/keycloak/keycloak_identitymanager.py new file mode 100644 index 0000000000..3c0ab08f19 --- /dev/null +++ b/ee/identitymanager/identity_managers/keycloak/keycloak_identitymanager.py @@ -0,0 +1,1081 @@ +import json +import os + +import requests +from fastapi import HTTPException +from fastapi.routing import APIRoute +from starlette.routing import Route + +from ee.identitymanager.identity_managers.keycloak.keycloak_authverifier import ( + KeycloakAuthVerifier, +) +from keep.api.models.user import Group, PermissionEntity, ResourcePermission, Role, User +from keep.contextmanager.contextmanager import ContextManager +from keep.identitymanager.authenticatedentity import AuthenticatedEntity +from keep.identitymanager.authverifierbase import AuthVerifierBase, get_all_scopes +from keep.identitymanager.identitymanager import PREDEFINED_ROLES, BaseIdentityManager +from keycloak import KeycloakAdmin +from keycloak.exceptions import KeycloakDeleteError, KeycloakGetError, KeycloakPostError +from keycloak.openid_connection import KeycloakOpenIDConnection + +# Some good sources on this topic: +# 1. https://stackoverflow.com/questions/42186537/resources-scopes-permissions-and-policies-in-keycloak +# 2. MUST READ - https://www.keycloak.org/docs/24.0.4/authorization_services/ +# 3. ADMIN REST API - https://www.keycloak.org/docs-api/22.0.1/rest-api/index.html +# 4. (TODO) PROTECTION API - https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_protection_api + + +class KeycloakIdentityManager(BaseIdentityManager): + def __init__(self, tenant_id, context_manager: ContextManager, **kwargs): + super().__init__(tenant_id, context_manager, **kwargs) + self.server_url = os.environ.get("KEYCLOAK_URL") + try: + self.keycloak_admin = KeycloakAdmin( + server_url=os.environ["KEYCLOAK_URL"] + "/admin", + username=os.environ.get("KEYCLOAK_ADMIN_USER"), + password=os.environ.get("KEYCLOAK_ADMIN_PASSWORD"), + realm_name=os.environ["KEYCLOAK_REALM"], + verify=True, + ) + self.client_id = self.keycloak_admin.get_client_id( + os.environ["KEYCLOAK_CLIENT_ID"] + ) + self.keycloak_id_connection = KeycloakOpenIDConnection( + server_url=os.environ["KEYCLOAK_URL"], + client_id=os.environ["KEYCLOAK_CLIENT_ID"], + realm_name=os.environ["KEYCLOAK_REALM"], + client_secret_key=os.environ["KEYCLOAK_CLIENT_SECRET"], + ) + + self.admin_url = f'{os.environ["KEYCLOAK_URL"]}/admin/realms/{os.environ["KEYCLOAK_REALM"]}/clients/{self.client_id}' + self.admin_url_without_client = f'{os.environ["KEYCLOAK_URL"]}/admin/realms/{os.environ["KEYCLOAK_REALM"]}' + self.realm = os.environ["KEYCLOAK_REALM"] + # if Keep controls the Keycloak server so it have event listener + # for future use + self.keep_controlled_keycloak = ( + os.environ.get("KEYCLOAK_KEEP_CONTROLLED", "false") == "true" + ) + except Exception as e: + self.logger.error( + "Failed to initialize Keycloak Identity Manager: %s", str(e) + ) + raise + self.logger.info("Keycloak Identity Manager initialized") + + def on_start(self, app) -> None: + # if the on start process is disabled: + if os.environ.get("SKIP_KEYCLOAK_ONSTART", "false") == "true": + self.logger.info("Skipping keycloak on start") + return + # first, create all the scopes + for scope in get_all_scopes(): + self.logger.info("Creating scope: %s", scope) + self.create_scope(scope) + self.logger.info("Scope created: %s", scope) + # create resource for each route + for route in app.routes: + self.logger.info("Creating resource for route %s", route.path) + # fetch the scopes for this route from the auth dependency + if isinstance(route, Route) and not isinstance(route, APIRoute): + self.logger.info("Skipping route: %s", route.path) + continue + if not route.dependant.dependencies: + self.logger.warning("Skipping unprotected route: %s", route.path) + continue + + scopes = [] + for dep in route.dependant.dependencies: + # for routes that have other dependencies + if not isinstance(dep.cache_key[0], KeycloakAuthVerifier): + continue + scopes = dep.cache_key[0].scopes + dep.cache_key[0].protected_resource = route.path + + # protected route but without scopes + if not scopes: + self.logger.warning("Route without scopes: %s", route.path) + + self.create_resource(route.path, scopes=scopes, resource_type="keep_route") + self.logger.info("Resource created for route: %s", route.path) + for role in PREDEFINED_ROLES: + self.logger.info("Creating role: %s", role) + self.create_role(role, predefined=True) + self.logger.info("Role created: %s", role) + + def _scope_name_to_id(self, all_scopes, scope_name: str) -> str: + # if its ":*": + if scope_name.split(":")[1] == "*": + scope_verb = scope_name.split(":")[0] + scope_ids = [ + scope["id"] + for scope in all_scopes + if scope["name"].startswith(scope_verb) + ] + return scope_ids + else: + scope = next( + (scope for scope in all_scopes if scope["name"] == scope_name), + None, + ) + return [scope["id"]] + + def get_permission_by_name(self, permission_name): + permissions = self.keycloak_admin.get_client_authz_permissions(self.client_id) + permission = next( + ( + permission + for permission in permissions + if permission["name"] == permission_name + ), + None, + ) + return permission + + def create_scope_based_permission(self, role: Role, policy_id: str) -> None: + try: + scopes = role.scopes + all_scopes = self.keycloak_admin.get_client_authz_scopes(self.client_id) + scopes_ids = set() + for scope in scopes: + scope_ids = self._scope_name_to_id(all_scopes, scope) + scopes_ids.update(scope_ids) + resp = self.keycloak_admin.create_client_authz_scope_permission( + client_id=self.client_id, + payload={ + "name": f"Permission for {role.name}", + "scopes": list(scopes_ids), + "policies": [policy_id], + "resources": [], + "decisionStrategy": "Affirmative".upper(), + "type": "scope", + "logic": "POSITIVE", + }, + ) + return resp + except KeycloakPostError as e: + # if the permissions already exists, just update it + if "already exists" in str(e): + self.logger.info("Scope based permission already exists in Keycloak") + # let's try to update + try: + permission = self.get_permission_by_name( + f"Permission for {role.name}" + ) + permission_id = permission.get("id") + resp = self.keycloak_admin.connection.raw_put( + path=f"{self.admin_url}/authz/resource-server/permission/scope/{permission_id}", + client_id=self.client_id, + data=json.dumps( + { + "name": f"Permission for {role.name}", + "scopes": list(scopes_ids), + "policies": [policy_id], + "resources": [], + "decisionStrategy": "Affirmative".upper(), + "type": "scope", + "logic": "POSITIVE", + } + ), + ) + except Exception: + pass + else: + self.logger.error( + "Failed to create scope based permission in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create scope based permission" + ) + + def create_scope(self, scope: str) -> None: + try: + self.keycloak_admin.create_client_authz_scopes( + self.client_id, + { + "name": scope, + "displayName": f"Scope for {scope}", + }, + ) + except KeycloakPostError as e: + self.logger.error("Failed to create scopes in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create scopes") + + def create_role(self, role: Role, predefined=False) -> str: + try: + role_name = self.keycloak_admin.create_client_role( + self.client_id, + { + "name": role.name, + "description": f"Role for {role.name}", + # we will use this to identify the role as predefined + "attributes": { + "predefined": [str(predefined).lower()], + }, + }, + skip_exists=True, + ) + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role_name) + # create the role policy + policy_id = self.create_role_policy(role_id, role.name, role.description) + # create the scope based permission + self.create_scope_based_permission(role, policy_id) + return role_id + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Role already exists in Keycloak") + # its ok! + pass + else: + self.logger.error("Failed to create roles in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create roles") + + def update_role(self, role_id: str, role: Role) -> str: + # just update the policy + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role.name) + scopes = role.scopes + all_scopes = self.keycloak_admin.get_client_authz_scopes(self.client_id) + scopes_ids = set() + for scope in scopes: + scope_ids = self._scope_name_to_id(all_scopes, scope) + scopes_ids.update(scope_ids) + # get the scope-based permission + permissions = self.keycloak_admin.get_client_authz_permissions(self.client_id) + permission = next( + ( + permission + for permission in permissions + if permission["name"] == f"Permission for {role.name}" + ), + None, + ) + if not permission: + raise HTTPException(status_code=404, detail="Permission not found") + permission_id = permission["id"] + permission["scopes"] = list(scopes_ids) + resp = self.keycloak_admin.connection.raw_put( + f"{self.admin_url}/authz/resource-server/permission/scope/{permission_id}", + data=json.dumps(permission), + ) + resp.raise_for_status() + return role_id + + def create_role_policy(self, role_id: str, role_name: str, role_description) -> str: + try: + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/role", + data=json.dumps( + { + "name": f"Allow {role_name} to {role_description}", + "description": f"Allow {role_name} to {role_description}", # future use + "roles": [{"id": role_id, "required": False}], + "logic": "POSITIVE", + "fetchRoles": False, + } + ), + ) + resp.raise_for_status() + resp = resp.json() + return resp.get("id") + except requests.exceptions.HTTPError as e: + if "Conflict" in str(e): + self.logger.info("Policy already exists in Keycloak") + # get its id + policies = self.get_policies() + # find by name + policy = next( + ( + policy + for policy in policies + if policy["name"] == f"Allow {role_name} to {role_description}" + ), + None, + ) + return policy["id"] + else: + self.logger.error("Failed to create policies in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create policies") + except Exception as e: + self.logger.error("Failed to create policies in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create policies") + + @property + def support_sso(self) -> bool: + return True + + def get_sso_providers(self) -> list[str]: + return [] + + def get_sso_wizard_url(self, authenticated_entity: AuthenticatedEntity) -> str: + tenant_realm = authenticated_entity.org_realm + org_id = authenticated_entity.org_id + return f"{self.server_url}realms/{tenant_realm}/wizard/?org_id={org_id}/#iss={self.server_url}/realms/{tenant_realm}" + + def get_users(self) -> list[User]: + try: + # TODO: query only users that Keep created (so not show all LDAP users) + users = self.keycloak_admin.get_users({}) + users = [user for user in users if "firstName" in user] + + users_dto = [] + for user in users: + # todo: should be more efficient + groups = self.keycloak_admin.get_user_groups(user["id"]) + groups = [ + { + "id": group["id"], + "name": group["name"], + } + for group in groups + ] + role = self.get_user_current_role(user_id=user.get("id")) + user_dto = User( + email=user.get("email", ""), + name=user.get("firstName", ""), + role=role, + created_at=user.get("createdTimestamp", ""), + ldap=( + True + if user.get("attributes", {}).get("LDAP_ID", False) + else False + ), + last_login=user.get("attributes", {}).get("last-login", [""])[0], + groups=groups, + ) + users_dto.append(user_dto) + return users_dto + except KeycloakGetError as e: + self.logger.error("Failed to fetch users from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch users") + + def create_user( + self, + user_email: str, + user_name: str, + password: str, + role: list[str], + groups: list[str], + ) -> dict: + try: + user_data = { + "username": user_email, + "email": user_email, + "enabled": True, + "firstName": user_name, + } + if password: + user_data["credentials"] = [ + {"type": "password", "value": password, "temporary": False} + ] + + user_id = self.keycloak_admin.create_user(user_data) + if role: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.assign_client_role( + client_id=self.client_id, + user_id=user_id, + roles=[{"id": role_id, "name": role}], + ) + for group in groups: + self.add_user_to_group(user_id=user_id, group=group) + + return { + "status": "success", + "message": "User created successfully", + "user_id": user_id, + } + except KeycloakPostError as e: + if "User exists" in str(e): + self.logger.error( + "Failed to create user - user %s already exists", user_email + ) + raise HTTPException( + status_code=409, + detail=f"Failed to create user - user {user_email} already exists", + ) + self.logger.error("Failed to create user in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create user") + + def get_user_id_by_email(self, user_email: str) -> str: + user_id = self.keycloak_admin.get_users(query={"email": user_email}) + if not user_id: + self.logger.error("User does not exists") + raise HTTPException(status_code=404, detail="User does not exists") + elif len(user_id) > 1: + self.logger.error("Multiple users found") + raise HTTPException( + status_code=500, detail="Multiple users found, please contact admin" + ) + user_id = user_id[0]["id"] + return user_id + + def get_user_current_role(self, user_id: str) -> str: + current_role = ( + self.keycloak_admin.connection.raw_get( + self.admin_url_without_client + f"/users/{user_id}/role-mappings" + ) + .json() + .get("clientMappings", {}) + .get(self.realm, {}) + .get("mappings") + ) + + if current_role: + # remove uma protection + current_role = [ + role for role in current_role if role["name"] != "uma_protection" + ] + # if uma_protection is the only role, then the user has no role + if current_role: + return current_role[0]["name"] + else: + return None + else: + return None + + def add_user_to_group(self, user_id: str, group: str): + resp = self.keycloak_admin.connection.raw_put( + f"{self.admin_url_without_client}/users/{user_id}/groups/{group}", + data=json.dumps({}), + ) + resp.raise_for_status() + + def update_user(self, user_email: str, update_data: dict) -> dict: + try: + user_id = self.get_user_id_by_email(user_email) + if "role" in update_data and update_data["role"]: + role = update_data["role"] + # get current role and understand if needs to be updated: + current_role = self.get_user_current_role(user_id) + # update the role only if its different than current + # TODO: more than one role + if current_role != role: + role_id = self.keycloak_admin.get_client_role_id( + self.client_id, role + ) + if not role_id: + self.logger.error("Role does not exists") + raise HTTPException( + status_code=404, detail="Role does not exists" + ) + self.keycloak_admin.assign_client_role( + client_id=self.client_id, + user_id=user_id, + roles=[{"id": role_id, "name": role}], + ) + if "groups" in update_data and update_data["groups"]: + # get the current groups + groups = self.keycloak_admin.get_user_groups(user_id) + groups_ids = [g.get("id") for g in groups] + # calc with groups needs to be removed and which to be added + groups_to_remove = [ + group_id + for group_id in groups_ids + if group_id not in update_data["groups"] + ] + + groups_to_add = [ + group for group in update_data["groups"] if group not in groups_ids + ] + # remove + for group in groups_to_remove: + self.logger.info("Leaving group") + resp = self.keycloak_admin.connection.raw_delete( + f"{self.admin_url_without_client}/users/{user_id}/groups/{group}" + ) + resp.raise_for_status() + self.logger.info("Left group") + # add + for group in groups_to_add: + self.logger.info("Joining group") + self.add_user_to_group(user_id=user_id, group=group) + self.logger.info("Joined group") + return {"status": "success", "message": "User updated successfully"} + except KeycloakPostError as e: + self.logger.error("Failed to update user in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to update user") + + def delete_user(self, user_email: str) -> dict: + try: + user_id = self.get_user_id_by_email(user_email) + self.keycloak_admin.delete_user(user_id) + # delete the policy for the user (if not implicitly deleted?) + return {"status": "success", "message": "User deleted successfully"} + except KeycloakDeleteError as e: + self.logger.error("Failed to delete user from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete user") + + def get_auth_verifier(self, scopes: list) -> AuthVerifierBase: + return KeycloakAuthVerifier(scopes) + + def create_resource( + self, resource_name: str, scopes: list[str] = [], resource_type="keep_generic" + ) -> None: + resource = { + "name": resource_name, + "displayName": f"Resource for {resource_name}", + "type": "urn:keep:resources:" + resource_type, + "scopes": [{"name": scope} for scope in scopes], + } + try: + self.keycloak_admin.create_client_authz_resource(self.client_id, resource) + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Resource already exists in Keycloak") + pass + else: + self.logger.error("Failed to create resource in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create resource") + + def delete_resource(self, resource_id: str) -> None: + try: + resources = self.keycloak_admin.get_client_authz_resources( + os.environ["KEYCLOAK_CLIENT_ID"] + ) + for resource in resources: + if resource["uris"] == ["/resource/" + resource_id]: + self.keycloak_admin.delete_client_authz_resource( + os.environ["KEYCLOAK_CLIENT_ID"], resource["id"] + ) + except KeycloakDeleteError as e: + self.logger.error("Failed to delete resource from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete resource") + + def get_groups(self) -> list[dict]: + try: + groups = self.keycloak_admin.get_groups( + query={"briefRepresentation": False} + ) + result = [] + for group in groups: + group_id = group["id"] + group_name = group["name"] + roles = group.get("clientRoles", {}).get("keep", []) + + # Fetch members for each group + members = self.keycloak_admin.get_group_members(group_id) + member_names = [member.get("email", "") for member in members] + member_count = len(members) + + result.append( + Group( + id=group_id, + name=group_name, + roles=roles, + memberCount=member_count, + members=member_names, + ) + ) + return result + except KeycloakGetError as e: + self.logger.error("Failed to fetch groups from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch groups") + + def create_user_policy(self, perm, permission: ResourcePermission) -> None: + # we need the user id from email: + # TODO: this is not efficient, we should cache this + users = self.keycloak_admin.get_users({}) + user = next( + (user for user in users if user["email"] == perm.id), + None, + ) + if not user: + raise HTTPException(status_code=400, detail="User not found") + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/user", + data=json.dumps( + { + "name": f"Allow user {user.get('id')} to access resource type {permission.resource_type} with name {permission.resource_name}", + "description": json.dumps( + { + "user_id": user.get("id"), + "user_email": user.get("email"), + "resource_id": permission.resource_id, + } + ), + "logic": "POSITIVE", + "users": [user.get("id")], + } + ), + ) + try: + resp.raise_for_status() + # 409 is ok, it means the policy already exists + except Exception as e: + if resp.status_code != 409: + raise e + # just continue to next policy + else: + return None + policy_id = resp.json().get("id") + return policy_id + + def create_group_policy(self, perm, permission: ResourcePermission) -> None: + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/group", + data=json.dumps( + { + "name": f"Allow group {perm.id} to access resource type {permission.resource_type} with name {permission.resource_name}", + "description": json.dumps( + { + "group_id": perm.id, + "resource_id": permission.resource_id, + } + ), + "logic": "POSITIVE", + "groups": [{"id": perm.id, "extendChildren": False}], + "groupsClaim": "", + } + ), + ) + try: + resp.raise_for_status() + # 409 is ok, it means the policy already exists + except Exception as e: + if resp.status_code != 409: + raise e + # just continue to next policy + else: + return None + policy_id = resp.json().get("id") + return policy_id + + def create_permissions(self, permissions: list[ResourcePermission]) -> None: + # create or update + try: + existing_permissions = self.keycloak_admin.get_client_authz_permissions( + self.client_id, + ) + existing_permission_names_to_permissions = { + permission["name"]: permission for permission in existing_permissions + } + for permission in permissions: + # 1. first, create the resource if its not already created + resp = self.keycloak_admin.create_client_authz_resource( + self.client_id, + { + "name": permission.resource_id, + "displayName": permission.resource_name, + "type": permission.resource_type, + "scopes": [], + }, + skip_exists=True, + ) + # 2. create the policy if it doesn't exist: + policies = [] + for perm in permission.permissions: + try: + if perm.type == "user": + policy_id = self.create_user_policy(perm, permission) + if policy_id: + policies.append(policy_id) + else: + self.logger.info("Policy already exists in Keycloak") + else: + policy_id = self.create_group_policy(perm, permission) + if policy_id: + policies.append(policy_id) + else: + self.logger.info("Policy already exists in Keycloak") + + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Policy already exists in Keycloak") + # its ok! + pass + else: + self.logger.error( + "Failed to create policy in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create policy" + ) + except Exception as e: + self.logger.error( + "Failed to create policy in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create policy" + ) + + # 3. Finally, create the resource + # 3.0 try to get the resource based permission + permission_name = f"Permission on resource type {permission.resource_type} with name {permission.resource_name}" + if existing_permission_names_to_permissions.get(permission_name): + # update the permission + existing_permissions = existing_permission_names_to_permissions[ + permission_name + ] + existing_permission_id = existing_permissions["id"] + # if no new policies, continue + if not policies: + existing_permissions["policies"] = [] + else: + # add the new policies + associated_policies = self.keycloak_admin.get_client_authz_permission_associated_policies( + self.client_id, existing_permission_id + ) + existing_permissions["policies"] = [ + policy["id"] for policy in associated_policies + ] + existing_permissions["policies"].extend(policies) + # update the policy to include the new policy + resp = self.keycloak_admin.connection.raw_put( + f"{self.admin_url}/authz/resource-server/permission/resource/{existing_permission_id}", + data=json.dumps(existing_permissions), + ) + resp.raise_for_status() + else: + # 3.2 else, create it + self.keycloak_admin.create_client_authz_resource_based_permission( + self.client_id, + { + "type": "resource", + "name": f"Permission on resource type {permission.resource_type} with name {permission.resource_name}", + "scopes": [], + "policies": policies, + "resources": [ + permission.resource_id, + ], + "decisionStrategy": "Affirmative".upper(), + }, + ) + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Permission already exists in Keycloak") + raise HTTPException(status_code=409, detail="Permission already exists") + else: + self.logger.error( + "Failed to create permissions in Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to create permissions" + ) + except Exception as e: + self.logger.error("Failed to create permissions in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create permissions") + + def get_permissions(self) -> list[ResourcePermission]: + try: + resources = self.keycloak_admin.get_client_authz_resources(self.client_id) + resources_to_policies = {} + permissions = self.keycloak_admin.get_client_authz_permissions( + self.client_id + ) + for permission in permissions: + # if its a scope permission, skip it + if permission["type"] == "scope": + continue + permission_id = permission["id"] + associated_policies = ( + self.keycloak_admin.get_client_authz_permission_associated_policies( + self.client_id, permission_id + ) + ) + for policy in associated_policies: + try: + details = json.loads(policy["description"]) + # with Keep convention, the description should be a json + except json.JSONDecodeError: + self.logger.warning( + "Failed to parse policy description: %s", + policy["description"], + ) + continue + resource_id = details["resource_id"] + if resource_id not in resources_to_policies: + resources_to_policies[resource_id] = [] + if policy.get("type") == "user": + resources_to_policies[resource_id].append( + {"id": details.get("user_email"), "type": "user"} + ) + else: + resources_to_policies[resource_id].append( + {"id": details["group_id"], "type": "group"} + ) + permissions_dto = [] + for resource in resources: + resource_id = resource["name"] + resource_name = resource["displayName"] + resource_type = resource["type"] + permissions_dto.append( + ResourcePermission( + resource_id=resource_id, + resource_name=resource_name, + resource_type=resource_type, + permissions=[ + PermissionEntity( + id=policy["id"], + type=policy["type"], + ) + for policy in resources_to_policies.get(resource_id, []) + ], + ) + ) + return permissions_dto + except KeycloakGetError as e: + self.logger.error("Failed to fetch permissions from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch permissions") + + # TODO: this should use UMA and not evaluation since evaluation needs admin access + def get_user_permission_on_resource_type( + self, resource_type: str, authenticated_entity: AuthenticatedEntity + ) -> list[ResourcePermission]: + """ + Get permissions for a specific user on a specific resource type. + + Args: + resource_type (str): The type of resource for which to retrieve permissions. + user_id (str): The ID of the user for which to retrieve permissions. + + Returns: + list: A list of permission objects. + """ + # there is two ways to do this: + # 1. admin api + # 2. token endpoint directly + # we will use the admin api and put (2) on TODO + # https://keycloak.discourse.group/t/keyycloak-authz-policy-evaluation-using-rest-api/798/2 + # https://keycloak.discourse.group/t/how-can-i-evaluate-user-permission-over-rest-api/10619 + + # also, we should see how it scale with many resources + try: + user_id = self.keycloak_admin.get_user_id(authenticated_entity.email) + resp = self.keycloak_admin.connection.raw_post( + f"{self.admin_url}/authz/resource-server/policy/evaluate", + data=json.dumps( + { + "userId": user_id, + "resources": [ + { + "type": resource_type, + } + ], + "context": {"attributes": {}}, + "clientId": self.client_id, + } + ), + ) + results = resp.json() + results = results.get("results", []) + allowed_resources_ids = [ + result["resource"]["name"] + for result in results + if result["status"] == "PERMIT" + ] + return allowed_resources_ids + except Exception as e: + self.logger.error( + "Failed to fetch user permissions from Keycloak: %s", str(e) + ) + raise HTTPException( + status_code=500, detail="Failed to fetch user permissions" + ) + + def get_policies(self) -> list[dict]: + try: + policies = self.keycloak_admin.connection.raw_get( + f"{self.admin_url}/authz/resource-server/policy" + ).json() + return policies + except KeycloakGetError as e: + self.logger.error("Failed to fetch policies from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch policies") + + def get_roles(self) -> list[Role]: + """ + Get roles in the identity manager for authorization purposes. + + This method is used to retrieve the roles that have been defined + in the identity manager. It returns a list of role objects, each + containing the resource, scope, and user or group information. + + # TODO: Still to review if this is the correct way to fetch roles + """ + try: + roles = self.keycloak_admin.get_client_roles( + self.client_id, brief_representation=False + ) + # filter out the uma role + roles = [role for role in roles if role["name"] != "uma_protection"] + roles_dto = { + role.get("id"): Role( + id=role.get("id"), + name=role["name"], + description=role["description"], + scopes=set([]), # will populate this later + predefined=( + True + if role.get("attributes", {}).get("predefined", ["false"])[0] + == "true" + else False + ), + ) + for role in roles + } + # now for each role we need to get the scopes + policies = self.keycloak_admin.get_client_authz_policies(self.client_id) + roles_related_policies = [ + policy + for policy in policies + if policy.get("config", {}).get("roles", []) + ] + for policy in roles_related_policies: + role_id = json.loads(policy["config"]["roles"])[0].get("id") + policy_id = policy["id"] + # get dependent permissions + dependentPolicies = self.keycloak_admin.connection.raw_get( + f"{self.admin_url}/authz/resource-server/policy/{policy_id}/dependentPolicies", + ).json() + dependentPoliciesId = dependentPolicies[0].get("id") + scopes = self.keycloak_admin.connection.raw_get( + f"{self.admin_url}/authz/resource-server/policy/{dependentPoliciesId}/scopes", + ).json() + scope_names = [scope["name"] for scope in scopes] + # happens only when delete role fails from some resaon + if role_id not in roles_dto: + self.logger.warning("Role not found for policy, skipping") + continue + roles_dto[role_id].scopes.update(scope_names) + return list(roles_dto.values()) + except KeycloakGetError as e: + self.logger.error("Failed to fetch roles from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to fetch roles") + + def get_role_by_role_name(self, role_name: str) -> Role: + roles = self.get_roles() + role = next((role for role in roles if role.name == role_name), None) + if not role: + self.logger.error("Role not found") + raise HTTPException(status_code=404, detail="Role not found") + return role + + def delete_role(self, role_id: str) -> None: + try: + # delete the role + resp = self.keycloak_admin.connection.raw_delete( + f"{self.admin_url_without_client}/roles-by-id/{role_id}", + ) + resp.raise_for_status() + # delete the policy + policies = self.get_policies() + for policy in policies: + roles = json.loads(policy.get("config", {}).get("roles", "{}")) + if roles and roles[0].get("id") == role_id: + policy_id = policy.get("id") + break + + if not policy_id: + self.logger.warning("Policy not found for role deletion, skipping") + else: + self.logger.info("Deleteing policy id") + self.keycloak_admin.delete_client_authz_policy( + self.client_id, policy_id + ) + self.logger.info("Policy id deleted") + # permissions gets deleted impliclty when we delete the policy + except KeycloakDeleteError as e: + self.logger.error("Failed to delete role from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete role") + + def create_group( + self, group_name: str, members: list[str], roles: list[str] + ) -> None: + try: + # create it + group_id = self.keycloak_admin.create_group( + { + "name": group_name, + } + ) + # add members + for member in members: + user_id = self.get_user_id_by_email(member) + self.keycloak_admin.group_user_add(user_id=user_id, group_id=group_id) + # assign roles + for role in roles: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.assign_group_client_roles( + client_id=self.client_id, + group_id=group_id, + roles=[{"id": role_id, "name": role}], + ) + except KeycloakPostError as e: + if "already exists" in str(e): + self.logger.info("Group already exists in Keycloak") + pass + else: + self.logger.error("Failed to create group in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to create group") + + def update_group( + self, group_name: str, members: list[str], roles: list[str] + ) -> None: + try: + # get the group id + groups = self.keycloak_admin.get_groups(query={"search": group_name}) + if not groups: + self.logger.error("Group not found") + raise HTTPException(status_code=404, detail="Group not found") + group_id = groups[0]["id"] + # check what members needs to be added and which to be removed + existing_members = self.keycloak_admin.get_group_members(group_id) + existing_members = [member.get("email") for member in existing_members] + members_to_add = [ + member for member in members if member not in existing_members + ] + members_to_remove = [ + member for member in existing_members if member not in members + ] + # remove members + for member in members_to_remove: + user_id = self.get_user_id_by_email(member) + self.keycloak_admin.group_user_remove( + user_id=user_id, group_id=group_id + ) + + # add members + for member in members_to_add: + user_id = self.get_user_id_by_email(member) + self.keycloak_admin.group_user_add(user_id=user_id, group_id=group_id) + + # check what roles needs to be added and which to be removed + existing_roles = self.keycloak_admin.get_group_client_roles( + client_id=self.client_id, group_id=group_id + ) + existing_roles = [role["name"] for role in existing_roles] + roles_to_add = [role for role in roles if role not in existing_roles] + roles_to_remove = [role for role in existing_roles if role not in roles] + # remove roles + for role in roles_to_remove: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.connection.raw_delete( + f"{self.admin_url_without_client}/groups/{group_id}/role-mappings/clients/{self.client_id}", + payload={ + "client": self.client_id, + "group": group_id, + "roles": [{"id": role_id, "name": role}], + }, + ) + # assign roles + for role in roles_to_add: + role_id = self.keycloak_admin.get_client_role_id(self.client_id, role) + self.keycloak_admin.assign_group_client_roles( + client_id=self.client_id, + group_id=group_id, + roles=[{"id": role_id, "name": role}], + ) + except KeycloakPostError as e: + self.logger.error("Failed to update group in Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to update group") + + def delete_group(self, group_name: str) -> None: + try: + groups = self.keycloak_admin.get_groups(query={"search": group_name}) + if not groups: + self.logger.error("Group not found") + raise HTTPException(status_code=404, detail="Group not found") + group_id = groups[0]["id"] + self.keycloak_admin.delete_group(group_id) + except KeycloakDeleteError as e: + self.logger.error("Failed to delete group from Keycloak: %s", str(e)) + raise HTTPException(status_code=500, detail="Failed to delete group") diff --git a/examples/alerts/db_disk_space.yml b/examples/alerts/db_disk_space.yml deleted file mode 100644 index 73e235aa00..0000000000 --- a/examples/alerts/db_disk_space.yml +++ /dev/null @@ -1,45 +0,0 @@ -# Database disk space is low (<10%) -alert: - id: db-disk-space - description: Check that the DB has enough disk space - owners: - - github-shahargl - - slack-talboren - services: - - db - - api - trigger: - # Run every hour or if the service-is-failing alert is triggered - interval: 1h - event: - - id: service-is-failing - type: alert - steps: - - name: db-no-space - provider: - type: mock - config: "{{ providers.db-server-mock }}" - command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space - command_output: 16% # Mock - condition: - - id: threshold - value: 90% # Trigger if more than 90% full - on_failure: # What to do if the SSH connection failed? - - name: ssh-connection-failed - retry: 5 # Retry 5 times - alert: true # Finally, alert - actions: - - name: trigger-slack - host: slack - channel: db-is-down - template: slack-error-template - context: - - name: db-name - value: "{{ hosts.db-server.name }}" - - name: disk-space - value: "{{ steps.db-no-space.command.output }}" - -provider: -- id: db-server-mock - description: mocks a db server - authentication: diff --git a/examples/alerts/monitor_migration_version.yml b/examples/alerts/monitor_migration_version.yml deleted file mode 100644 index 6d0d9a9902..0000000000 --- a/examples/alerts/monitor_migration_version.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Alert if the schemas versions of my customers are not the same -# (each schema version stored in ALEMBIC_VERSION table in the customer db) -alert: - id: db-migration - description: Monitor that all DB's has the same db version - trigger: - # Any push to production should trigger this allert - event: - - id: push-to-production - from: github - # This repos trigger db migrations so that's when we need to check the alert - repos: - - palad-in/api - - palad-in/identity - steps: - - name: get-all-customers - from: snowflake-prod - # Get all customers - query: "select customer_id from customers" - # If the query fails, retry 5 times and finally if it fails 5 times, trigger the db-failure alert - on_error: - retry: 5 - action: &db-failure - # For each customer, check the schema version - - name: check-alembic-version - foreach: { { steps.get-all-customers.results } } - from: snowflake-prod - query: "select alembic_version from {{ steps.get-all-customers.results[i].customer_id }}" - condition: - - type: eval - value: "diff( {{steps.this.results[].alembic_version}} )" # Trigger if not all schema versions are the same - actions: - - name: trigger-slack - host: slack - channel: schemas-are-different - template: slack-message-template - context: - - name: query-results - value: "{{ steps.check-alembic-version.output }}" diff --git a/examples/alerts/purchase_fails.yml b/examples/alerts/purchase_fails.yml deleted file mode 100644 index 19ac33fb93..0000000000 --- a/examples/alerts/purchase_fails.yml +++ /dev/null @@ -1,27 +0,0 @@ -# User failed to buy (got 5XX on payment) and -# never tried to buy again (no following payments) - -alert: - id: purchase-failed - description: User failed to pay and left without buying - trigger: - interval: 5m - steps: - - name: user-added-item - description: Get all users that failed to pay - from: elastic-prod - query: 'url: "/cart/pay" AND method: "POST" and status!: 200' # ELK language - duration: 5m - - name: user-did-not-buy - description: The user did not try again to buy - from: elastic-prod - query: 'url: "/cart/pay" AND method: "POST"' - after: "{{ steps.user-added-item.time_invoked }} + 15m" # No payment after 15 minutes - join: "{{ steps.user-added-item.user_id }}" # Join on the user id - - actions: - - name: trigger-pager-duty - host: pagerduty - context: - - name: number-of-users-failed-to-pay-and-then-left - value: { { len(steps.user-did-not-buy.results) } } diff --git a/examples/alerts/service_failed.yml b/examples/alerts/service_failed.yml deleted file mode 100644 index d2e1c85886..0000000000 --- a/examples/alerts/service_failed.yml +++ /dev/null @@ -1,39 +0,0 @@ -alert: - id: service-failed - description: Service failing with 5XX errors - trigger: - interval: 5m # Run every 5 minutes - duration: 5m # Look for the last 5 minutes (can be overrided in jobs) - ends: 01/02/2023 # The alert will be disposed after 1.2.2023 - dont_alert_if: - - name: fatigue - condition: len({{alerts.service-failed.prev.no_action}} ) > 3) - steps: - - name: service-failing-with-5xx - from: datadog - query: "status>500" # Datadog query language - duration: 60s # In the last 60 seconds - condition: - - type: threshold - value: 5 # Number of 5XX - dont_alert_if: # Don't alert if the db is down - - name: database-is-down - from: snowflake-prod - query: select * from API # Heartbeat - timeout: 5s # If the database is not answering after 5 seconds - actions: - - name: trigger-slack - host: slack - channel: service-is-down - template: hello {service-name} - context: - - name: service-name - value: { { steps.service-failing-with-5xx.service_name } } - - name: database-latency - value: - { - { steps.service-failing-with-5xx.database-is-down.dispatch_time }, - } - - name: number-of-5xx - value: - { { len(steps.service-failing-with-5xx.database-is-down.results) } } diff --git a/examples/providers/datadog.yml b/examples/providers/datadog.yml deleted file mode 100644 index b9f13e4b43..0000000000 --- a/examples/providers/datadog.yml +++ /dev/null @@ -1,7 +0,0 @@ -provider: - id: datadog - description: Datadog Production - provider_type: datadog - authentication: - api_key: 1234-abcd-efgh-5678-1111 # {{DATADOG_API_KEY}} to read from secret store - account: us5.datadoghq.com diff --git a/examples/providers/elastic.yml b/examples/providers/elastic.yml deleted file mode 100644 index 69c7a36b54..0000000000 --- a/examples/providers/elastic.yml +++ /dev/null @@ -1,7 +0,0 @@ -provider: - id: elastic - description: Elastic Production - provider_type: elasticsearch - authentication: - api_key: 1234-abcd-efgh-5678-1111 # {{ELASTIC_API_KEY}} to read from secret store - account: m12345.elastic.co diff --git a/examples/providers/github.yml b/examples/providers/github.yml deleted file mode 100644 index 7ebb1e853f..0000000000 --- a/examples/providers/github.yml +++ /dev/null @@ -1,6 +0,0 @@ -provider: - id: github-prod - description: GitHub Production - provider_type: github - authentication: - pat: abcdegh123456 # {{GITHUB_PAT}} to read from secret store diff --git a/examples/providers/prod-db.yml b/examples/providers/prod-db.yml deleted file mode 100644 index 9d85c98521..0000000000 --- a/examples/providers/prod-db.yml +++ /dev/null @@ -1,8 +0,0 @@ -provider: - id: db-server - description: Database Production Server - provider_type: ssh - authentication: - user: monitoring - ssh_key: ssh-rsa AASajsdnlajs # {{DB_SSH_KEY}} to read from secret store - hostname: db.production.com diff --git a/examples/providers/slack.yml b/examples/providers/slack.yml deleted file mode 100644 index b298c4debd..0000000000 --- a/examples/providers/slack.yml +++ /dev/null @@ -1,6 +0,0 @@ -provider: - id: slack - description: Paladin's slack - provider_type: slack - authentication: - webhook-url: https://yourorg.slack.com/webhooks-whatever1234 diff --git a/examples/providers/snowflake.yml b/examples/providers/snowflake.yml deleted file mode 100644 index aa89727fa9..0000000000 --- a/examples/providers/snowflake.yml +++ /dev/null @@ -1,8 +0,0 @@ -provider: - id: snowflake-prod - description: Snowflake Production - provider_type: snowflake - authentication: - user: snowflake-prod-user - password: 1234 # {{SNOWFLAKE_PROD_PASSWORD}} to read from secret store - account: gcp1234.snowflake.com diff --git a/examples/templates/slack.yaml b/examples/templates/slack.yaml deleted file mode 100644 index bba6d64da1..0000000000 --- a/examples/templates/slack.yaml +++ /dev/null @@ -1 +0,0 @@ -# SOME SLACK TEMPLATE diff --git a/examples/workflows/aks_basic.yml b/examples/workflows/aks_basic.yml new file mode 100644 index 0000000000..52aa985b05 --- /dev/null +++ b/examples/workflows/aks_basic.yml @@ -0,0 +1,20 @@ +workflow: + id: aks-example + description: aks-example + triggers: + - type: manual + steps: + # get all pods + - name: get-pods + provider: + type: aks + config: "{{ providers.aks }}" + with: + command_type: get_pods + actions: + - name: echo-pod-status + foreach: "{{ steps.get-pods.results }}" + provider: + type: console + with: + message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" diff --git a/examples/workflows/autosupress.yml b/examples/workflows/autosupress.yml new file mode 100644 index 0000000000..3e6e85cf47 --- /dev/null +++ b/examples/workflows/autosupress.yml @@ -0,0 +1,14 @@ +workflow: + id: autosupress + strategy: parallel + description: demonstrates how to automatically suppress alerts + triggers: + - type: alert + actions: + - name: dismiss-alert + provider: + type: mock + with: + enrich_alert: + - key: dismissed + value: "true" diff --git a/examples/workflows/bash_example.yml b/examples/workflows/bash_example.yml new file mode 100644 index 0000000000..4b5518ef93 --- /dev/null +++ b/examples/workflows/bash_example.yml @@ -0,0 +1,29 @@ +workflow: + id: Resend-Python-service + description: Python Resend Mail + triggers: + - type: manual + owners: [] + services: [] + steps: + - name: run-script + provider: + config: '{{ providers.default-bash }}' + type: bash + with: + command: python3 test.py + timeout: 5 + actions: + - condition: + - assert: '{{ steps.run-script.results.return_code }} == 0' + name: assert-condition + type: assert + name: trigger-resend + provider: + type: resend + config: "{{ providers.resend-test }}" + with: + _from: "onboarding@resend.dev" + to: "youremail.dev@gmail.com" + subject: "Python test is up!" + html:

Python test is up!

diff --git a/examples/workflows/bigquery.yml b/examples/workflows/bigquery.yml new file mode 100644 index 0000000000..10ea244c83 --- /dev/null +++ b/examples/workflows/bigquery.yml @@ -0,0 +1,74 @@ +alert: + id: bq-sql-query + description: Monitor that time difference is no more than 1 hour + steps: + - name: get-max-datetime + provider: + type: bigquery + config: "{{ providers.bigquery-prod }}" + with: + # Get max(datetime) from the random table + query: "SELECT MAX(created_date) as date FROM `bigquery-public-data.austin_311.311_service_requests` LIMIT 1" + - name: runbook-step1-bigquery-sql + provider: + type: bigquery + config: "{{ providers.bigquery-prod }}" + with: + # Get max(datetime) from the random table + query: "SELECT * FROM `bigquery-public-data.austin_bikeshare.bikeshare_stations` LIMIT 10" + actions: + - name: opsgenie-alert + condition: + - name: threshold-condition + type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.get-max-datetime.results[0][date] }}")) + compare_to: 1 # hours + compare_type: gt # greater than + # Give it an alias so we can use it in the slack action + alias: A + provider: + type: opsgenie + config: " {{ providers.opsgenie-prod }} " + with: + message: "DB datetime value ({{ actions.opsgenie-alert.conditions.threshold-condition.0.compare_value }}) is greater than 1! ๐Ÿšจ" + - name: trigger-slack + if: "{{ A }}" + provider: + type: slack + config: " {{ providers.slack-prod }} " + with: + message: "DB datetime value ({{ actions.opsgenie-alert.conditions.threshold-condition.0.compare_value }}) is greater than 1! ๐Ÿšจ" + - name: trigger-slack-2 + if: "{{ A }}" + provider: + type: slack + config: " {{ providers.slack-prod }} " + with: + blocks: + - type: header + text: + type: plain_text + text: "Adding some context to the alert:" + emoji: true + - type: section + text: + type: mrkdwn + text: |- + {{#steps.runbook-step1-bigquery-sql.results}} + - Station id: {{station_id}} | Status: {{status}} + {{/steps.runbook-step1-bigquery-sql.results}} + + +providers: + bigquery-prod: + description: BigQuery Prod + authentication: + opsgenie-prod: + authentication: + api_key: "{{ env.OPSGENIE_API_KEY }}" + slack-prod: + authentication: + webhook_url: "{{ env.SLACKDEMO_WEBHOOK }}" diff --git a/examples/workflows/blogpost.yml b/examples/workflows/blogpost.yml new file mode 100644 index 0000000000..367981bab6 --- /dev/null +++ b/examples/workflows/blogpost.yml @@ -0,0 +1,52 @@ +workflow: + id: blogpost-workflow + description: Enrich the alerts and open ticket + triggers: + # filter on critical alerts + - type: alert + filters: + - key: severity + value: critical + steps: + # get the customer details + - name: get-more-details + provider: + type: mysql + config: " {{ providers.mysql-prod }} " + with: + query: "select * from blogpostdb.customer where customer_id = '{{ alert.customer_id }}'" + single_row: true + as_dict: true + enrich_alert: + - key: customer_name + value: results.name + - key: customer_email + value: results.email + - key: customer_tier + value: results.tier + actions: + # Create service now incident ticket + - name: create-service-now-ticket + # if the alert already assigned a ticket, skip it + if: "not '{{ alert.ticket_id }}'" + provider: + type: servicenow + config: " {{ providers.servicenow-prod }} " + with: + table_name: INCIDENT + payload: + short_description: "{{ alert.name }} - {{ alert.description }} [created by Keep][fingerprint: {{alert.fingerprint}}]" + description: "{{ alert.description }}" + enrich_alert: + - key: ticket_type + value: servicenow + - key: ticket_id + value: results.sys_id + - key: ticket_url + value: results.link + - key: ticket_status + value: results.stage + - key: table_name + value: "{{ alert.annotations.ticket_type }}" + - key: ticket_number + value: results.number diff --git a/examples/workflows/change.yml b/examples/workflows/change.yml new file mode 100644 index 0000000000..82f800504a --- /dev/null +++ b/examples/workflows/change.yml @@ -0,0 +1,13 @@ +workflow: + id: on-field-change + description: demonstrates how to trigger a workflow when a field changes + triggers: + - type: alert + only_on_change: + - status + actions: + - name: echo-test + provider: + type: console + with: + message: "Hello world" diff --git a/examples/workflows/console_example.yml b/examples/workflows/console_example.yml new file mode 100644 index 0000000000..6e800c07ef --- /dev/null +++ b/examples/workflows/console_example.yml @@ -0,0 +1,12 @@ +workflow: + id: console-example + description: console-example + triggers: + - type: manual + actions: + - name: echo + provider: + type: console + with: + logger: true + message: "Hey" diff --git a/examples/workflows/consts_and_vars.yml b/examples/workflows/consts_and_vars.yml new file mode 100644 index 0000000000..29501e3c24 --- /dev/null +++ b/examples/workflows/consts_and_vars.yml @@ -0,0 +1,147 @@ +workflow: + id: sa_pipeline_status_alert + description: gpu_test + triggers: + - type: alert + filters: + - key: source + value: "openobserve" + name: gpu_test + + # consts block for email_template and slack_message + consts: + email_template: | + Hi,
+ This {{ vars.alert_tier }} is triggered because the pipelines for {{ alert.host }} are down for more than keep.get_firing_time('{{ alert }}', 'minutes') minutes.
+ Please visit monitoring.keeohq.dev for more!
+ Regards,
+ KeepHQ dev Monitoring
+ + slack_message: | + {{ vars.alert_tier }} Alert: SA Pipelines are down + + Hi, + This {{ vars.alert_tier }} alert is triggered because the pipelines for {{ alert.host }} are down for more than keep.get_firing_time('{{ alert }}', 'minutes') minutes. + Please visit monitoring.keeohq.dev for more! + + actions: + # Sendgrid Tier 0 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 0 and keep.get_firing_time('{{ alert }}', 'minutes') < 10" + name: Sendgrid_Tier_0_alert + vars: + alert_tier: "Alert 0" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 0 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 1 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 10 and keep.get_firing_time('{{ alert }}', 'minutes') < 15" + name: Sendgrid_Tier_1_alert + vars: + alert_tier: "Alert 1" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 1 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 2 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 60 and keep.get_firing_time('{{ alert }}', 'minutes') < 70" + name: Sendgrid_Tier_2_alert + vars: + alert_tier: "Alert 2" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 2 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 3 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 120 and keep.get_firing_time('{{ alert }}', 'minutes') < 130" + name: Sendgrid_Tier_3_alert + vars: + alert_tier: "Alert 3" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 3 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Sendgrid Tier 4 Alert + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 1440 and keep.get_firing_time('{{ alert }}', 'minutes') < 1450" + name: Sendgrid_Tier_4_alert + vars: + alert_tier: "Alert 4" + provider: + config: "{{ providers.Sendgrid }}" + type: sendgrid + with: + to: + - "shahar@keephq.dev" + subject: '"Tier 4 Alert: SA Pipelines are down"' + html: "{{ consts.email_template }}" + + # Slack Alerts + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 0 and keep.get_firing_time('{{ alert }}', 'minutes') < 10" + name: Slack_Tier_0_alert + vars: + alert_tier: "Alert 0" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 10 and keep.get_firing_time('{{ alert }}', 'minutes') < 15" + name: Slack_Tier_1_alert + vars: + alert_tier: "Alert 1" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 60 and keep.get_firing_time('{{ alert }}', 'minutes') < 70" + name: Slack_Tier_2_alert + vars: + alert_tier: "Alert 2" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 120 and keep.get_firing_time('{{ alert }}', 'minutes') < 130" + name: Slack_Tier_3_alert + vars: + alert_tier: "Alert 3" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" + + - if: "keep.get_firing_time('{{ alert }}', 'minutes') >= 1440 and keep.get_firing_time('{{ alert }}', 'minutes') < 1450" + name: Slack_Tier_4_alert + vars: + alert_tier: "Alert 4" + provider: + config: "{{ providers.dev_slack }}" + type: slack + with: + message: "{{ consts.slack_message }}" diff --git a/examples/workflows/create_jira_ticket_upon_alerts.yml b/examples/workflows/create_jira_ticket_upon_alerts.yml new file mode 100644 index 0000000000..5a5f9d1a97 --- /dev/null +++ b/examples/workflows/create_jira_ticket_upon_alerts.yml @@ -0,0 +1,45 @@ +workflow: + id: sentry-alerts + description: handle alerts + triggers: + - type: alert + # we want to run this workflow only for Sentry alerts with high severity + filters: + - key: source + value: sentry + - key: severity + value: critical + - key: service + value: r"(payments|ftp)" + actions: + - name: send-slack-message-team-payments + # if the alert is on the payments service, slack the payments team + if: "'{{ alert.service }}' == 'payments'" + provider: + type: slack + config: " {{ providers.team-payments-slack }} " + with: + message: | + "A new alert from Sentry: Alert: {{ alert.name }} - {{ alert.description }} + {{ alert}}" + - name: create-jira-ticket-oncall-board + if: "'{{ alert.service }}' == 'ftp' and not '{{ alert.ticket_id }}'" + provider: + type: jira + config: " {{ providers.jira }} " + with: + board_name: "Oncall Board" + issuetype: "Task" + summary: "{{ alert.name }} - {{ alert.description }} (created by Keep)" + description: | + "This ticket was created by Keep. + Please check the alert details below: + {code:json} {{ alert }} {code}" + # enrich the alerts + enrich_alert: + - key: ticket_type + value: jira + - key: ticket_id + value: results.issue.key + - key: ticket_url + value: results.ticket_url diff --git a/examples/workflows/create_service_now_ticket_upon_alerts.yml b/examples/workflows/create_service_now_ticket_upon_alerts.yml new file mode 100644 index 0000000000..c075ac2057 --- /dev/null +++ b/examples/workflows/create_service_now_ticket_upon_alerts.yml @@ -0,0 +1,33 @@ +workflow: + id: servicenow + description: create a ticket in servicenow when an alert is triggered + triggers: + - type: alert + # create ticket for grafana/prometheus alerts + filters: + - key: source + value: r"(grafana|prometheus)" + actions: + - name: create-service-now-ticket + # if the ticket id is not present in the alert, create a ticket + if: "not '{{ alert.ticket_id }}' and {{ alert.annotations.ticket_type }}" + provider: + type: servicenow + config: " {{ providers.servicenow }} " + with: + table_name: "{{ alert.annotations.ticket_type }}" + payload: + short_description: "{{ alert.name }} - {{ alert.description }} [created by Keep][fingerprint: {{alert.fingerprint}}]" + description: "{{ alert.description }}" + # enrich the alert with the ticket number and other details returned from servicenow + enrich_alert: + - key: ticket_type + value: servicenow + - key: ticket_id + value: results.sys_id + - key: ticket_url + value: results.link + - key: ticket_status + value: results.stage + - key: table_name + value: "{{ alert.annotations.ticket_type }}" diff --git a/examples/workflows/cron-digest-alerts.yml b/examples/workflows/cron-digest-alerts.yml new file mode 100644 index 0000000000..10baa9ecc7 --- /dev/null +++ b/examples/workflows/cron-digest-alerts.yml @@ -0,0 +1,27 @@ +workflow: + id: alerts-daily-digest + description: run alerts digest twice a day (on 11:00 and 14:00) + triggers: + - type: interval + cron: 0 11,14 * * * + steps: + # get the alerts from keep + - name: get-alerts + provider: + type: keep + with: + filters: + # filter out alerts that are closed + - key: status + value: open + timerange: + from: "{{ state.workflows.alerts-daily-digest.last_run_time }}" + to: now + actions: + - name: send-digest + foreach: "{{ steps.get-alerts.results }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: "Open alert: {{ foreach.value.name }}" diff --git a/examples/workflows/db_disk_space.yml b/examples/workflows/db_disk_space.yml new file mode 100644 index 0000000000..cde82c520e --- /dev/null +++ b/examples/workflows/db_disk_space.yml @@ -0,0 +1,131 @@ +# Database disk space is low (<10%) +alert: + id: db-disk-space + description: Check that the DB has enough disk space + owners: + - github-shahargl + - slack-talboren + services: + - db + - api + # Run every 60 seconds + #interval: 60 + steps: + - name: db-no-space + provider: + type: mock + config: "{{ providers.db-server-mock }}" + with: + command: df -h | grep /dev/disk3s1s1 | awk '{ print $5}' # Check the disk space + command_output: 91% # Mock + actions: + - name: trigger-slack + condition: + - name: threshold-condition + type: threshold + value: "{{ steps.db-no-space.results }}" + compare_to: 90% # Trigger if more than 90% full + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + # Message is always mandatory + message: > + The disk space of {{ providers.db-server-mock.description }} is about to finish + Disk space left: {{ steps.db-no-space.results }} + blocks: + - type: header + text: + type: plain_text + text: 'Alert! :alarm_clock:' + emoji: true + - type: section + text: + type: mrkdwn + text: |- + Hello, SRE and Assistant to the Regional Manager Dwight! *Michael Scott* wants to know what's going on with the servers in the paper warehouse, there is a critical issue on-going and paper *must be delivered on time*. + *This is the alert context:* + - type: divider + - type: section + text: + type: mrkdwn + text: |- + Server *{{ providers.db-server-mock.description }}* + :floppy_disk: disk space is at {{ steps.db-no-space.results }} capacity + Seems like it prevents further inserts in to the database with some weird exception: 'This is a prank by Jim Halpert' + This means that paper production is currently on hold, Dunder Mifflin Paper Company *may lose revenue due to that*. + accessory: + type: image + image_url: https://media.licdn.com/dms/image/C4E03AQGtRDDj3GI4Ig/profile-displayphoto-shrink_800_800/0/1550248958619?e=2147483647&v=beta&t=-AYVwN44CsHUdIcd-7iOHQVVjfhEC0DZydhlmvNvTKo + alt_text: jim does dwight + - type: divider + - type: input + element: + type: multi_users_select + placeholder: + type: plain_text + text: Select users + emoji: true + action_id: multi_users_select-action + label: + type: plain_text + text: Select the people for the mission + emoji: true + - type: divider + - type: section + text: + type: plain_text + text: 'Some context that can help you:' + emoji: true + - type: context + elements: + - type: plain_text + text: 'DB System Info: Some important context fetched from the DB' + emoji: true + - type: context + elements: + - type: image + image_url: https://pbs.twimg.com/profile_images/625633822235693056/lNGUneLX_400x400.jpg + alt_text: cute cat + - type: mrkdwn + text: "*Cat* is currently on site, ready to follow your instructions." + - type: divider + - dispatch_action: true + type: input + element: + type: plain_text_input + action_id: plain_text_input-action + label: + type: plain_text + text: Please Acknowledge + emoji: true + - type: actions + elements: + - type: button + style: primary + text: + type: plain_text + text: ":dog: Datadog" + emoji: true + value: click_me_123 + - type: button + style: danger + text: + type: plain_text + text: ":sos: Database" + emoji: true + value: click_me_123 + url: https://google.com + - type: button + text: + type: plain_text + text: ":book: Playbook" + emoji: true + value: click_me_123 + url: https://google.com + + +providers: + db-server-mock: + description: Paper DB Server + authentication: diff --git a/examples/workflows/dd.yml b/examples/workflows/dd.yml new file mode 100644 index 0000000000..e6839c138e --- /dev/null +++ b/examples/workflows/dd.yml @@ -0,0 +1,48 @@ +alert: + id: db-disk-space + triggers: + - type: manual + steps: + - name: check-error-rate + provider: + type: datadog + config: "{{ providers.datadog }}" + with: + query: "service:keep-github-app" + timeframe: "3d" + query_type: "logs" + actions: + - name: trigger-slack + condition: + - name: threshold-condition + type: threshold + value: "keep.len({{ steps.check-error-rate.results.logs }})" + compare_to: 0 + operator: ">" + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + channel: db-is-down + # Message is always mandatory + message: > + The db is down. Please investigate. + blocks: + - type: section + text: + type: plain_text + text: | + Query: {{ steps.check-error-rate.provider_parameters.query }} + Timeframe: {{ steps.check-error-rate.provider_parameters.timeframe }} + Number of logs: keep.len({{ steps.check-error-rate.results.logs }}) + From: {{ steps.check-error-rate.provider_parameters.from }} + To: {{ steps.check-error-rate.provider_parameters.to }} + +providers: + db-server-mock: + description: Paper DB Server + authentication: + datadog: + authentication: + api_key: "{{ env.DATADOG_API_KEY }}" + app_key: "{{ env.DATADOG_APP_KEY }}" diff --git a/examples/workflows/discord_basic.yml b/examples/workflows/discord_basic.yml new file mode 100644 index 0000000000..60ffb2db3b --- /dev/null +++ b/examples/workflows/discord_basic.yml @@ -0,0 +1,19 @@ +workflow: + id: discord-example + description: Discord example + triggers: + - type: manual + actions: + - name: discord + provider: + type: discord + config: "{{ providers.discordtest }}" + with: + content: Alerta! + components: + - type: 1 # Action row + components: + - type: 2 # Button + style: 1 # Primary style + label: "Click Me!" + custom_id: "button_click" diff --git a/examples/workflows/disk_grown_defects_rule.yml b/examples/workflows/disk_grown_defects_rule.yml new file mode 100644 index 0000000000..e139f82d7a --- /dev/null +++ b/examples/workflows/disk_grown_defects_rule.yml @@ -0,0 +1,39 @@ + +# Alert description: this alert will trigger if the disk defects is over 50%, 40% or 30%. +# Alert breakdown: +# 1. Read the disk status from postgres (select * from disk) +# 2. For each disk, check if the disk defects is over 50% (major), 40% (medium) or 30% (minor). +# 3. If the disk defects is over the threshold, insert a new row to the alert table with the disk name and the disk defects. +alert: + id: DiskGrownDefectsRule + steps: + - name: check-disk-defects + provider: + type: postgres + config: "{{ providers.postgres-server }}" + with: + query: "select * from disk" + actions: + - name: push-alert-to-postgres + foreach: "{{steps.check-disk-defects.results}}" + condition: + - name: threshold-condition + type: threshold + value: " {{ foreach.value[13] }} " # disk defect is the 13th column + compare_to: 50, 40, 30 + level: major, medium, minor + provider: + type: postgres + config: "{{ providers.postgres-server }}" + with: + query: >- + INSERT INTO alert (alert_level, alert_message) + VALUES ('{{ foreach.level }}', 'Disk defects: {{ foreach.value[13] }} | Disk name: {{ foreach.value[1] }}') +providers: + postgres-server: + description: The postgres server (sql) + authentication: + username: "{{ env.POSTGRES_USER }}" + password: "{{ env.POSTGRES_PASSWORD }}" + database: "{{ env.POSTGRES_DATABASE }}" + host: "{{ env.POSTGRES_HOST }}" diff --git a/examples/workflows/elastic_enrich_example.yml b/examples/workflows/elastic_enrich_example.yml new file mode 100644 index 0000000000..d4a711c68d --- /dev/null +++ b/examples/workflows/elastic_enrich_example.yml @@ -0,0 +1,81 @@ +# if no acknowledgement has been recieved (updated in index) for x (from config index) time, i want to escalate it to next level of people +workflow: + id: elastic-enrich + description: escalate-if-needed + triggers: + # run every minute + - type: interval + value: 1m + steps: + # first, query the ack index to check if there are any alerts that have not been acknowledged + - name: query-ack-index + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: your_ack_index + query: | + { + "query": { + "bool": { + "must": [ + { + "match": { + "acknowledged": false + } + } + ] + } + } + } + - name: query-config-index + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: your_config_index + query: | + { + "query": { + "bool": { + "must": [ + { + "match": { + "config": true + } + } + ] + } + } + } + - name: query-people-index + provider: + type: elastic + config: " {{ providers.elastic }} " + with: + index: your_people_index + query: | + { + "query": { + "bool": { + "must": [ + { + "match": { + "people": true + } + } + ] + } + } + } + # now, we have the results from the ack index, config index, and people index + actions: + - name: escalate-if-needed + # if there are any alerts that have not been acknowledged + if: "{{ query-ack-index.hits.total.value }} > 0" + provider: + type: slack # or email or whatever you want + config: " {{ providers.slack }} " + with: + message: | + "A unacknowledged alert has been found: {{ query-ack-index.hits.hits }} {{ query-config-index.hits.hits }} {{ query-people-index.hits.hits }}" diff --git a/examples/workflows/failed-to-login-workflow.yml b/examples/workflows/failed-to-login-workflow.yml new file mode 100644 index 0000000000..caa8181fb0 --- /dev/null +++ b/examples/workflows/failed-to-login-workflow.yml @@ -0,0 +1,34 @@ +workflow: + id: query-bigquery-when-alert-triggers-by-cloudwatch + description: Decide how to alert based on customer tier and enrich context + triggers: + - type: alert + filters: + - key: name + value: "User failed to login" + steps: + - name: get-customer-tier-by-id + provider: + type: bigquery + config: "{{ providers.bigquery-prod }}" + with: + query: "SELECT customer_name, tier FROM `bigquery-production.prod-db.customers` WHERE customer_id = {{ alert.customer_id }} LIMIT 1" + actions: + # for enterprise customer, open an incident in opsgenie + - name: opsgenie-alert + condition: + - name: enterprise-tier + type: assert + assert: "{{ steps.get-customer-tier-by-id.result.tier }} == 'enterprise'" + provider: + type: opsgenie + config: " {{ providers.opsgenie-prod }} " + with: + message: "User of customer {{ steps.get-customer-tier-by-id.result.customer_name }} failed to login!" + # for every customer, send a slack message + - name: trigger-slack + provider: + type: slack + config: " {{ providers.slack-prod }} " + with: + message: "User of customer {{ steps.get-customer-tier-by-id.result.customer_name }} failed to login!" diff --git a/examples/workflows/gke.yml b/examples/workflows/gke.yml new file mode 100644 index 0000000000..458e27152e --- /dev/null +++ b/examples/workflows/gke.yml @@ -0,0 +1,20 @@ +alert: + id: gke-example + description: gke-example + triggers: + - type: manual + steps: + # get all pods + - name: get-pods + provider: + type: gke + config: "{{ providers.GKE }}" + with: + command_type: get_pods + actions: + - name: echo-pod-status + foreach: "{{ steps.get-pods.results }}" + provider: + type: console + with: + message: "Pod name: {{ foreach.value.metadata.name }} || Namespace: {{ foreach.value.metadata.namespace }} || Status: {{ foreach.value.status.phase }}" diff --git a/examples/workflows/http_example.yml b/examples/workflows/http_example.yml new file mode 100644 index 0000000000..c4edf70ae3 --- /dev/null +++ b/examples/workflows/http_example.yml @@ -0,0 +1,37 @@ +workflow: + id: tiering-workflow + triggers: + # when an incident is created or updated with a new alert + - type: incident + on: + - create + - update + actions: + - name: send-slack-message-tier-0 + # send tier0 if this is a new incident (no tier set) or if the incident is tier0 but the alert is alert2 + if: "{{ !incident.current_tier || incident.current_tier == 0 && alert.name == 'alert2' }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + "Incident created: {{ incident.name }} - {{ incident.description }} + Tier: 0" + Alert: {{ alert.name }} - {{ alert.description }} + Alert details: {{ alert }}" + # enrich the incident with the current tier + enrich_incident: + current_tier: 0 + - name: send-slack-message-tier-1 + if: "{{ incident.current_tier == 0 && alert.name == 'alert1' }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + "Incident updated: {{ incident.name }} - {{ incident.description }} + Tier: 1 + Alert: {{ alert.name }} - {{ alert.description }} + Alert details: {{ alert }}" + enrich_incident: + current_tier: 1 diff --git a/examples/workflows/ilert-incident-upon-alert.yaml b/examples/workflows/ilert-incident-upon-alert.yaml new file mode 100644 index 0000000000..09ba1dea0c --- /dev/null +++ b/examples/workflows/ilert-incident-upon-alert.yaml @@ -0,0 +1,23 @@ +id: aad72d69-92b9-4e21-8f67-97d2a69bf8ac +description: Create ILert incident upon Keep Alert +triggers: +- filters: + - key: source + value: keep + type: alert +owners: [] +services: [] +steps: [] +actions: +- name: ilert-action + provider: + config: '{{ providers.ilert-default }}' + type: ilert + with: + affectedServices: + - impact: OPERATIONAL + service: + id: 339743 + message: A mock incident created with Keep! + status: INVESTIGATING + summary: Keep Incident {{ alert.name }} diff --git a/examples/workflows/incident_example.yml b/examples/workflows/incident_example.yml new file mode 100644 index 0000000000..46ce6266ff --- /dev/null +++ b/examples/workflows/incident_example.yml @@ -0,0 +1,15 @@ +workflow: + id: aks-example + description: aks-example + triggers: + - type: incident + events: + - updated + - created + + actions: + - name: just-echo + provider: + type: console + with: + message: "Hey there! I am an incident!" diff --git a/examples/workflows/jira_on_prem.yml b/examples/workflows/jira_on_prem.yml new file mode 100644 index 0000000000..fc79b15645 --- /dev/null +++ b/examples/workflows/jira_on_prem.yml @@ -0,0 +1,24 @@ +workflow: + id: jiraonprem-example + description: JIRA on-prem example + triggers: + - type: manual + name: test + owners: [] + services: [] + steps: [] + actions: + - name: jiraonprem-action + provider: + config: '{{ providers.jira }}' + type: jiraonprem + with: + board_name: SA + custom_fields: '' + description: test + issue_type: Incident + labels: + - "SRE_Team" + priority: Low + project_key: SA + summary: test diff --git a/examples/workflows/keep_semantic_alert_example_datadog.yml b/examples/workflows/keep_semantic_alert_example_datadog.yml new file mode 100644 index 0000000000..325e7bf980 --- /dev/null +++ b/examples/workflows/keep_semantic_alert_example_datadog.yml @@ -0,0 +1,34 @@ +# AUTO GENERATED +# Alert that was created with Keep semantic layer +# Prompt: can you write an alert spec that triggers when a service has more than 0.01% error rate in datadog for more than an hour? +alert: + id: service-error-rate + description: Check if the service has more than 0.01% error rate for more than an hour + owners: + - github-johndoe + - slack-janedoe + services: + - my-service + steps: + - name: check-error-rate + provider: + type: datadog + config: "{{ providers.datadog }}" + with: + query: "sum:my_service.errors{*}.as_count() / sum:my_service.requests{*}.as_count() * 100" + timeframe: "1h" + actions: + - name: notify-slack + condition: + - name: threshold-condition + type: threshold + value: "{{ steps.check-error-rate.results }}" + compare_to: 0.01 + operator: ">" + provider: + type: slack + config: "{{ providers.slack-demo }}" + with: + channel: service-alerts + message: > + The my_service error rate is higher than 0.01% for more than an hour. Please investigate. diff --git a/examples/workflows/new_auth0_users.yml b/examples/workflows/new_auth0_users.yml new file mode 100644 index 0000000000..e9c0f0fe40 --- /dev/null +++ b/examples/workflows/new_auth0_users.yml @@ -0,0 +1,36 @@ +# Alert when there are new Auth0 users +alert: + id: new-auth0-users + description: Get new users logged in to the platform + steps: + - name: get-auth0-users + provider: + type: auth0.logs + config: "{{ providers.auth0 }}" + with: + log_type: ss + previous_users: "{{ state.new-auth0-users.-1.alert_context.alert_steps_context.get-auth0-users.results.users }}" # state.alert-id.-1 for last run + actions: + - name: trigger-slack + condition: + - name: assert-condition + type: assert + assert: "{{ steps.get-auth0-users.results.new_users_count }} == 0" # if there are more than 0 new users, trigger the action + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + blocks: + - type: section + text: + type: plain_text + text: There are new keep.len({{ steps.get-auth0-users.results.new_users }}) users! + emoji: true + - type: section + text: + type: plain_text + text: |- + {{#steps.get-auth0-users.results.new_users}} + - {{user_name}} + {{/steps.get-auth0-users.results.new_users}} + emoji: true diff --git a/examples/workflows/new_github_stars.yml b/examples/workflows/new_github_stars.yml new file mode 100644 index 0000000000..41f91e3d5b --- /dev/null +++ b/examples/workflows/new_github_stars.yml @@ -0,0 +1,42 @@ +id: new-github-stars +description: Notify Slack about new GitHub star for keephq/keep +triggers: + - type: manual + - type: interval + value: 300 +steps: + - name: get-github-stars + provider: + config: "{{ providers.github }}" + type: github.stars + with: + previous_stars_count: + default: 0 + key: "{{ last_workflow_results.get-github-stars.0.stars }}" + repository: keephq/keep +actions: + - condition: + - assert: "{{ steps.get-github-stars.results.new_stargazers_count }} > 0" + name: assert-condition + type: assert + name: trigger-slack + provider: + config: "{{ providers.slack-demo }}" + type: slack + with: + blocks: + - text: + emoji: true + text: There are new keep.len({{ steps.get-github-stars.results.new_stargazers}}) stargazers for keephq/keep + type: plain_text + type: section + - text: + emoji: true + text: "{{#steps.get-github-stars.results.new_stargazers}} + + - {{username}} at {{starred_at}} + + {{/steps.get-github-stars.results.new_stargazers}}" + type: plain_text + type: section + channel: "C06N0KXXXX" diff --git a/examples/workflows/ntfy_basic.yml b/examples/workflows/ntfy_basic.yml new file mode 100644 index 0000000000..64b762771f --- /dev/null +++ b/examples/workflows/ntfy_basic.yml @@ -0,0 +1,13 @@ +workflow: + id: ntfy-example + description: ntfy-example + triggers: + - type: manual + actions: + - name: ntfy + provider: + type: ntfy + config: "{{ providers.ntfy }}" + with: + message: "test-message" + topic: "test-topic" diff --git a/examples/workflows/opsgenie_open_alerts.yml b/examples/workflows/opsgenie_open_alerts.yml new file mode 100644 index 0000000000..a861981b9f --- /dev/null +++ b/examples/workflows/opsgenie_open_alerts.yml @@ -0,0 +1,28 @@ +alert: + id: opsgenie-get-open-alerts + description: Get open alerts from Opsgenie + steps: + - name: get-open-alerts + provider: + type: opsgenie + config: "{{ providers.opsgenie }}" + with: + type: alerts + query: "status: open" + actions: + - name: slack + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + # Message is always mandatory + message: > + Opsgenie has {{ steps.get-open-alerts.results.number_of_alerts }} open alerts + blocks: + - type: section + text: + type: mrkdwn + text: |- + {{#steps.get-open-alerts.results.alerts}} + - Alert Id: {{id}} | Priortiy: {{priority}} | Created at: {{created_at}} | Message: {{message}} + {{/steps.get-open-alerts.results.alerts}} diff --git a/examples/workflows/planner_basic.yml b/examples/workflows/planner_basic.yml new file mode 100644 index 0000000000..ab460af24e --- /dev/null +++ b/examples/workflows/planner_basic.yml @@ -0,0 +1,18 @@ +workflow: + id: planner-demo + description: Create a task in planner. + triggers: + - type: interval + value: 15 + actions: + - name: create-planner-task + provider: + type: planner + config: " {{ providers.planner }} " + with: + title: "Keep HQ Task1" + plan_id: "tAtCor_XPEmqTzVqTigCycgABz0K" + on-failure: + retry: + count: 2 + interval: 2 \ No newline at end of file diff --git a/examples/workflows/query_clickhouse.yml b/examples/workflows/query_clickhouse.yml new file mode 100644 index 0000000000..50ed16054a --- /dev/null +++ b/examples/workflows/query_clickhouse.yml @@ -0,0 +1,31 @@ +id: query-clickhouse +description: Query Clickhouse and send an alert if there is an error +triggers: + - type: manual + +steps: + - name: clickhouse-step + provider: + config: "{{ providers.clickhouse }}" + type: clickhouse + with: + query: SELECT * FROM logs_table ORDER BY timestamp DESC LIMIT 1; + single_row: "True" + +actions: + - name: ntfy-action + if: "'{{ steps.clickhouse-step.results.level }}' == 'ERROR'" + provider: + config: "{{ providers.ntfy }}" + type: ntfy + with: + message: "Error in clickhouse logs_table: {{ steps.clickhouse-step.results.level }}" + topic: clickhouse + + - name: slack-action + if: "'{{ steps.clickhouse-step.results.level }}' == 'ERROR'" + provider: + config: "{{ providers.slack }}" + type: slack + with: + message: "Error in clickhouse logs_table: {{ steps.clickhouse-step.results.level }}" diff --git a/examples/workflows/query_victoriametrics.yml b/examples/workflows/query_victoriametrics.yml new file mode 100644 index 0000000000..bfa5383c41 --- /dev/null +++ b/examples/workflows/query_victoriametrics.yml @@ -0,0 +1,46 @@ +workflow: + id: query-victoriametrics + name: victoriametrics + description: victoriametrics + triggers: + - type: manual + steps: + - name: victoriametrics-step + provider: + config: "{{ providers.victoriametrics }}" + type: victoriametrics + with: + query: avg(rate(process_cpu_seconds_total)) + queryType: query + + actions: + - name: trigger-slack1 + condition: + - name: threshold-condition + type: threshold + value: "{{ steps.victoriametrics-step.results.data.result.0.value.1 }}" + compare_to: 0.0050 + alias: A + operator: ">" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: "Result: {{ steps.victoriametrics-step.results.data.result.0.value.1 }} is greater than 0.0040! ๐Ÿšจ" + + - name: trigger-slack2 + if: "{{ A }}" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: "Result: {{ steps.victoriametrics-step.results.data.result.0.value.1 }} is greater than 0.0040! ๐Ÿšจ" + + - name: trigger-ntfy + if: "{{ A }}" + provider: + type: ntfy + config: "{{ providers.ntfy }}" + with: + message: "Result: {{ steps.victoriametrics-step.results.data.result.0.value.1 }} is greater than 0.0040! ๐Ÿšจ" + topic: ezhil diff --git a/examples/workflows/raw_sql_query_datetime.yml b/examples/workflows/raw_sql_query_datetime.yml new file mode 100644 index 0000000000..6f3819b10b --- /dev/null +++ b/examples/workflows/raw_sql_query_datetime.yml @@ -0,0 +1,28 @@ +# Alert if a result queried from the DB is above a certain thershold. +alert: + id: raw-sql-query + description: Monitor that time difference is no more than 1 hour + steps: + - name: get-max-datetime + provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + # Get max(datetime) from the random table + query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" + actions: + - name: trigger-slack + condition: + - name: threshold-condition + type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc("{{ steps.this.results[0][0] }}")) + compare_to: 1 # hours + compare_type: gt # greater than + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + message: "DB datetime value ({{ actions.trigger-slack.conditions.threshold.0.compare_value }}) is greater than 1! ๐Ÿšจ" diff --git a/examples/workflows/resolve_old_alerts.yml b/examples/workflows/resolve_old_alerts.yml new file mode 100644 index 0000000000..f299cfa3ac --- /dev/null +++ b/examples/workflows/resolve_old_alerts.yml @@ -0,0 +1,26 @@ +workflow: + id: resolve-old-alerts + description: + triggers: + - type: manual + - type: interval + value: 60 + steps: + # get the alerts from keep + - name: get-alerts + provider: + type: keep + with: + version: 2 + filter: "status == 'firing'" + actions: + - name: resolve-alerts + foreach: " {{ steps.get-alerts.results }} " + if: "keep.to_timestamp('{{ foreach.value.lastReceived }}') < keep.utcnowtimestamp() - 3600" + provider: + type: mock + with: + enrich_alert: + - key: status + value: resolved + disposable: true diff --git a/examples/workflows/sendgrid_basic.yml b/examples/workflows/sendgrid_basic.yml new file mode 100644 index 0000000000..af9ee7ccc6 --- /dev/null +++ b/examples/workflows/sendgrid_basic.yml @@ -0,0 +1,16 @@ +workflow: + id: sendgrid-basic-demo + description: send an email + triggers: + - type: manual + actions: + - name: trigger-email + provider: + type: sendgrid + config: " {{ providers.Sendgrid }} " + with: + to: + - "youremail@gmail.com" + - "youranotheremail@gmail.com" + subject: "Hello from Keep!" + html: "Test with HTML" diff --git a/examples/workflows/severity_changed.yml b/examples/workflows/severity_changed.yml new file mode 100644 index 0000000000..9f9523c137 --- /dev/null +++ b/examples/workflows/severity_changed.yml @@ -0,0 +1,13 @@ +workflow: + id: on-severity-change + description: demonstrates how to trigger a workflow when severity changes, and show available options + triggers: + - type: alert + severity_changed: true + actions: + - name: echo-test + provider: + type: console + with: + # "The severity has changed from warning to info (it has decreased from last alert)" + message: "The severity has changed from {{ alert.previous_severity }} to {{ alert.severity }} (it has {{ alert.severity_change }} since last alert)" diff --git a/examples/workflows/signl4-alerting-workflow.yaml b/examples/workflows/signl4-alerting-workflow.yaml new file mode 100644 index 0000000000..7923b3129a --- /dev/null +++ b/examples/workflows/signl4-alerting-workflow.yaml @@ -0,0 +1,18 @@ +id: signl4-alerting-workflow +description: handle alerts +triggers: +- filters: + - key: source + value: r".*" + type: alert +owners: [] +services: [] +steps: [] +actions: +- name: signl4-action + provider: + config: '{{ providers.SIGNL4 Alerting }}' + type: signl4 + with: + message: Test. + title: Keep Alert diff --git a/examples/workflows/simple_http_request_ntfy.yml b/examples/workflows/simple_http_request_ntfy.yml new file mode 100644 index 0000000000..ac19b99f7e --- /dev/null +++ b/examples/workflows/simple_http_request_ntfy.yml @@ -0,0 +1,32 @@ +# Alert if a result queried from the DB is above a certain thershold. +alert: + id: raw-sql-query + description: Monitor that time difference is no more than 1 hour + steps: + - name: get-max-datetime + provider: + type: mysql + config: "{{ providers.mysql-prod }}" + with: + # Get max(datetime) from the random table + query: "SELECT MAX(datetime) FROM demo_table LIMIT 1" + actions: + - name: trigger-ntfy + condition: + - name: threshold-condition + type: threshold + # datetime_compare(t1, t2) compares t1-t2 and returns the diff in hours + # utcnow() returns the local machine datetime in UTC + # to_utc() converts a datetime to UTC + value: keep.datetime_compare(keep.utcnow(), keep.to_utc({{ steps.get-max-datetime.results[0][0] }})) + compare_to: 1 # hours + compare_type: gt # greater than + provider: + type: http + with: + method: POST + body: + alert: "{{ alert }}" + fingerprint: "{{ alert.fingerprint }}" + some_customized_field: "{{ keep.strip(alert.some_attribute) }}" + url: "https://ntfy.sh/MoRen5UlPEQr8s4Y" diff --git a/examples/workflows/slack_basic.yml b/examples/workflows/slack_basic.yml new file mode 100644 index 0000000000..9c73f1da6b --- /dev/null +++ b/examples/workflows/slack_basic.yml @@ -0,0 +1,16 @@ +workflow: + id: slack-basic-demo + description: Send a slack message when a cloudwatch alarm is triggered + triggers: + - type: alert + filters: + - key: source + value: cloudwatch + - type: manual + actions: + - name: trigger-slack + provider: + type: slack + config: " {{ providers.slack-prod }} " + with: + message: "Got alarm from aws cloudwatch! {{ alert.name }}" diff --git a/examples/workflows/slack_basic_interval.yml b/examples/workflows/slack_basic_interval.yml new file mode 100644 index 0000000000..8cc9e0fbac --- /dev/null +++ b/examples/workflows/slack_basic_interval.yml @@ -0,0 +1,13 @@ +workflow: + id: slack-basic-demo + description: Send a slack message every interval + triggers: + - type: interval + value: 15 + actions: + - name: trigger-slack + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + message: "Send a slack message every 15 seconds!" diff --git a/examples/workflows/squadcast_example.yml b/examples/workflows/squadcast_example.yml new file mode 100644 index 0000000000..5849c5e3e2 --- /dev/null +++ b/examples/workflows/squadcast_example.yml @@ -0,0 +1,15 @@ +workflow: + id: squadcast + description: squadcast + triggers: + - type: alert + actions: + - name: create-incident + provider: + config: "{{ providers.squadcast }}" + type: squadcast + with: + additional_json: '{{ alert }}' + description: TEST + message: '{{ alert.name }}-test' + notify_type: incident diff --git a/examples/workflows/telegram_basic.yml b/examples/workflows/telegram_basic.yml new file mode 100644 index 0000000000..2bdc45f63e --- /dev/null +++ b/examples/workflows/telegram_basic.yml @@ -0,0 +1,13 @@ +workflow: + id: telegram-example + description: telegram-example + triggers: + - type: manual + actions: + - name: telegram + provider: + type: telegram + config: "{{ providers.telegram }}" + with: + message: "test" + chat_id: " {{ os.environ['TELEGRAM_CHAT_ID'] }}" diff --git a/examples/workflows/trello_new_card_alert.yml b/examples/workflows/trello_new_card_alert.yml new file mode 100644 index 0000000000..72f3265b09 --- /dev/null +++ b/examples/workflows/trello_new_card_alert.yml @@ -0,0 +1,38 @@ +# A new trello card was created +alert: + id: notify-new-trello-card + description: Notify my slack when new trello card is created + steps: + - name: trello-cards + provider: + type: trello + config: "{{ providers.trello-provider }}" + with: + project-name: demo-project + board_id: hIjQQX9S + filter: "createCard" + condition: + - name: assert-condition + type: assert + assert: "{{ state.notify-new-trello-card.-1.alert_context.alert_steps_context.trello-cards.results.number_of_cards }} >= {{steps.trello-cards.results.number_of_cards }}" # if there are more than 0 new stargazers, trigger the action + actions: + - name: trigger-slack + provider: + type: slack + config: " {{ providers.slack-demo }} " + with: + channel: some-channel-that-youll-decide-later + # Message is always mandatory + message: > + A new card was created + +providers: + trello-provider: + description: Trello Production + authentication: + api_key: "{{ env.TRELLO_API_KEY }}" + api_token: "{{ env.TRELLO_API_TOKEN }}" + slack-demo: + description: Slack Demo + authentication: + webhook_url: "{{ env.SLACK_WEBHOOK_URL }}" diff --git a/examples/workflows/update_jira_ticket.yml b/examples/workflows/update_jira_ticket.yml new file mode 100644 index 0000000000..b445144aa6 --- /dev/null +++ b/examples/workflows/update_jira_ticket.yml @@ -0,0 +1,15 @@ +workflow: + id: update-jira-ticket + triggers: + - type: manual + actions: + - name: jira-action + provider: + config: '{{ providers.Jira }}' + type: jira + with: + board_name: '' + description: Update description of an issue + issue_id: 10023 + project_key: '' + summary: Update summary of an issue diff --git a/examples/workflows/update_service_now_tickets_status.yml b/examples/workflows/update_service_now_tickets_status.yml new file mode 100644 index 0000000000..f4732cb3eb --- /dev/null +++ b/examples/workflows/update_service_now_tickets_status.yml @@ -0,0 +1,29 @@ +workflow: + id: servicenow + description: update the ticket status every minute + triggers: + - type: manual + steps: + # get the alerts from keep + - name: get-alerts + provider: + type: keep + # get all the alerts with sys_id (means that ticket exists for them) + with: + filters: + - key: ticket_type + value: servicenow + actions: + # update the tickets + - name: update-ticket + foreach: " {{ steps.get-alerts.results }} " + provider: + type: servicenow + config: " {{ providers.servicenow }} " + with: + ticket_id: "{{ foreach.value.alert_enrichment.enrichments.ticket_id }}" + table_name: "{{ foreach.value.alert_enrichment.enrichments.table_name }}" + fingerprint: "{{ foreach.value.alert_fingerprint }}" + enrich_alert: + - key: ticket_status + value: results.state diff --git a/examples/workflows/workflow_only_first_time_example.yml b/examples/workflows/workflow_only_first_time_example.yml new file mode 100644 index 0000000000..53e02efb80 --- /dev/null +++ b/examples/workflows/workflow_only_first_time_example.yml @@ -0,0 +1,18 @@ +workflow: + id: alert-first-time + description: send slack message only the first time an alert fires + triggers: + - type: alert + filters: + - key: name + value: "server-is-down" + actions: + - name: send-slack-message + if: "keep.is_first_time('{{ alert.fingerprint }}', '24h')" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + "Tier 1 Alert: {{ alert.name }} - {{ alert.description }} + Alert details: {{ alert }}" diff --git a/examples/workflows/workflow_start_example.yml b/examples/workflows/workflow_start_example.yml new file mode 100644 index 0000000000..9ccdfadaff --- /dev/null +++ b/examples/workflows/workflow_start_example.yml @@ -0,0 +1,27 @@ +workflow: + id: alert-time-check + description: Handle alerts based on startedAt timestamp + triggers: + - type: alert + filters: + - key: name + value: "server-is-down" + actions: + - name: send-slack-message-tier-1 + if: "keep.get_firing_time('{{ alert }}', 'minutes') > 15 and not keep.get_firing_time('{{ alert }}', 'minutes') < 30" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + "Tier 1 Alert: {{ alert.name }} - {{ alert.description }} + Alert details: {{ alert }}" + - name: send-slack-message-tier-2 + if: "keep.get_firing_time('{{ alert }}', 'minutes') > 30" + provider: + type: slack + config: "{{ providers.slack }}" + with: + message: | + "Tier 2 Alert: {{ alert.name }} - {{ alert.description }} + Alert details: {{ alert }}" diff --git a/keep-ui/.env.local.example b/keep-ui/.env.local.example new file mode 100644 index 0000000000..a9f72e2489 --- /dev/null +++ b/keep-ui/.env.local.example @@ -0,0 +1,17 @@ +NEXTAUTH_URL=http://localhost:3000 + +# Required: +# NEXTAUTH_SECRET= # Linux: `openssl rand -hex 32` or go to https://generate-secret.now.sh/32 + +# API +API_URL=http://localhost:8080 +# Auth +AUTH0_CLIENT_ID= +AUTH0_CLIENT_SECRET= +AUTH0_ISSUER= +# Pusher +PUSHER_HOST=localhost +PUSHER_PORT=6001 +PUSHER_APP_KEY=keepappkey +# Logging +LOG_FORMAT=dev_terminal \ No newline at end of file diff --git a/keep-ui/.eslintignore b/keep-ui/.eslintignore new file mode 100644 index 0000000000..3c3629e647 --- /dev/null +++ b/keep-ui/.eslintignore @@ -0,0 +1 @@ +node_modules diff --git a/keep-ui/.eslintrc.json b/keep-ui/.eslintrc.json new file mode 100644 index 0000000000..4d765f2817 --- /dev/null +++ b/keep-ui/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": ["next/core-web-vitals", "prettier"] +} diff --git a/keep-ui/.gitignore b/keep-ui/.gitignore new file mode 100644 index 0000000000..42daa1e2a6 --- /dev/null +++ b/keep-ui/.gitignore @@ -0,0 +1,43 @@ +# Logs +logs +*.log +npm-debug.log* + +# Runtime data +pids +*.pid +*.seed + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (http://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules +jspm_packages + +# Optional npm cache directory +.npm + +# Optional REPL history +.node_repl_history +.next + +.env.local + +app/topology/mock-topology-data.tsx +.vercel diff --git a/keep-ui/.prettierrc b/keep-ui/.prettierrc new file mode 100644 index 0000000000..f0eb61e0f7 --- /dev/null +++ b/keep-ui/.prettierrc @@ -0,0 +1,6 @@ +{ + "trailingComma": "es5", + "tabWidth": 2, + "semi": true, + "singleQuote": false +} diff --git a/keep-ui/README.md b/keep-ui/README.md new file mode 100644 index 0000000000..74fb49900b --- /dev/null +++ b/keep-ui/README.md @@ -0,0 +1,33 @@ +# Keep UI + +## Background + +Keep UI is a user interface platform designed to manage and configure providers for an application. It allows users to connect and disconnect various providers, such as Grafana and Datadog, and configure their authentication settings. The platform provides a user-friendly interface to facilitate the management of provider connections. + +## How to Start + +To start using Keep UI, follow the steps below: + +1. Clone the repository from GitHub. +2. Install the necessary dependencies by running `npm install` or `yarn install`. +3. Configure the environment variables required for the application. Refer to the documentation for the specific environment variables needed. +4. Start the development server using `npm run dev` or `yarn dev`. +5. Access the Keep UI application in your browser at `http://localhost:3000` (or the specified port). + +## How to Contribute + +Contributions to Keep UI are welcome and encouraged. To contribute to the project, please follow these guidelines: + +1. Fork the repository on GitHub. +2. Create a new branch for your feature or bug fix. +3. Make your changes in the branch, ensuring to adhere to the coding style and guidelines. +4. Write unit tests for new features or modifications, if applicable. +5. Commit your changes and push them to your forked repository. +6. Submit a pull request to the main repository, describing the changes and providing any additional relevant information. +7. Participate in the code review process and address any feedback or comments received. +8. Once approved, your changes will be merged into the main codebase. + +Please ensure that your contributions align with the project's coding standards, documentation guidelines, and overall goals. For major changes or new features, it is advisable to discuss them with the project maintainers or open an issue to gather feedback and ensure they align with the project roadmap. + +## License +Keep UI is released under the MIT License. diff --git a/keep-ui/app/ai/ai.tsx b/keep-ui/app/ai/ai.tsx new file mode 100644 index 0000000000..69f331629a --- /dev/null +++ b/keep-ui/app/ai/ai.tsx @@ -0,0 +1,216 @@ +"use client"; +import { Card, List, ListItem, Title, Subtitle } from "@tremor/react"; +import { useAIStats, usePollAILogs } from "utils/hooks/useAI"; +import { useSession } from "next-auth/react"; +import { getApiURL } from "utils/apiUrl"; +import { toast } from "react-toastify"; +import { useEffect, useState, useRef, FormEvent } from "react"; +import { AILogs } from "./model"; + +export default function Ai() { + const { data: aistats, isLoading } = useAIStats(); + const { data: session } = useSession(); + const [text, setText] = useState(""); + const [basicAlgorithmLog, setBasicAlgorithmLog] = useState(""); + const [newText, setNewText] = useState("Mine incidents"); + const [animate, setAnimate] = useState(false); + const onlyOnce = useRef(false); + + const mutateAILogs = (logs: AILogs) => { + setBasicAlgorithmLog(logs.log); + }; + usePollAILogs(mutateAILogs); + + useEffect(() => { + let index = 0; + + const interval = setInterval(() => { + setText(newText.slice(0, index + 1)); + index++; + + if (index === newText.length) { + clearInterval(interval); + } + }, 100); + + return () => { + clearInterval(interval); + }; + }, [newText]); + + const mineIncidents = async (e: FormEvent) => { + e.preventDefault(); + setAnimate(true); + setNewText("Mining ๐Ÿš€๐Ÿš€๐Ÿš€ ..."); + const apiUrl = getApiURL(); + const response = await fetch(`${apiUrl}/incidents/mine`, { + method: "POST", + headers: { + Authorization: `Bearer ${session?.accessToken}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({}), + }); + if (!response.ok) { + toast.error( + "Failed to mine incidents, please contact us if this issue persists." + ); + } + + setAnimate(false); + setNewText("Mine incidents"); + }; + + return ( +
+
+
+ AI Correlation + + Correlating alerts to incidents based on past alerts, incidents, and + the other data. + +
+
+ +
+ {aistats?.is_mining_enabled == false && ( +
+
๐Ÿ‘‹ You are almost there!
+ AI Correlation is coming soon. Make sure you have enough data + collected to prepare. +
+ + + + Connect an incident source to dump incidents, or create 10 + incidents manually + + + {aistats?.incidents_count && + aistats?.incidents_count >= 10 ? ( +
โœ…
+ ) : ( +
โณ
+ )} +
+
+ + Collect 100 alerts + + {aistats?.alerts_count && aistats?.alerts_count >= 100 ? ( +
โœ…
+ ) : ( +
โณ
+ )} +
+
+ + Collect alerts for more than 3 days + + {aistats?.first_alert_datetime && + new Date(aistats.first_alert_datetime) < + new Date(Date.now() - 3 * 24 * 60 * 60 * 1000) ? ( +
โœ…
+ ) : ( +
โณ
+ )} +
+
+
+
+
+ )} + {aistats?.is_mining_enabled && ( +
+
+ +

+ {aistats?.algorithm_verbose_name} +

+

+ Basic algorithm combining algorithmical methods to correlate + alerts to incidents and Large Language Models to provide + incident summary. +

+ +
+ Log: + {!basicAlgorithmLog &&

No recent logs found.

} + {basicAlgorithmLog} +
+ + +
+ +

+ Summarization v0.1 +

+

+ Using LLMs to provide a human-readable incident summary. +

+
+
+
+ )} +
+
+
+ ); +} diff --git a/keep-ui/app/ai/model.ts b/keep-ui/app/ai/model.ts new file mode 100644 index 0000000000..3d78cbb9fd --- /dev/null +++ b/keep-ui/app/ai/model.ts @@ -0,0 +1,11 @@ +export interface AIStats { + alerts_count: number; + incidents_count: number; + first_alert_datetime?: Date; + is_mining_enabled: boolean; + algorithm_verbose_name: string +} + +export interface AILogs { + log: string; +} \ No newline at end of file diff --git a/keep-ui/app/ai/page.tsx b/keep-ui/app/ai/page.tsx new file mode 100644 index 0000000000..d2eb3d5331 --- /dev/null +++ b/keep-ui/app/ai/page.tsx @@ -0,0 +1,11 @@ +import AI from "./ai"; + +export default function Page() { + return ; +} + +export const metadata = { + title: "Keep - AI Correlation", + description: + "Correlate Alerts and Incidents with AI to identify patterns and trends.", +}; diff --git a/keep-ui/app/alerts/ColumnSelection.tsx b/keep-ui/app/alerts/ColumnSelection.tsx new file mode 100644 index 0000000000..772e6ad046 --- /dev/null +++ b/keep-ui/app/alerts/ColumnSelection.tsx @@ -0,0 +1,135 @@ +import { FormEvent, Fragment, useRef } from "react"; +import { Table } from "@tanstack/table-core"; +import { Button } from "@tremor/react"; +import { useLocalStorage } from "utils/hooks/useLocalStorage"; +import { VisibilityState, ColumnOrderState } from "@tanstack/react-table"; +import { FloatingArrow, arrow, offset, useFloating } from "@floating-ui/react"; +import { Popover } from "@headlessui/react"; +import { FiSettings } from "react-icons/fi"; +import { DEFAULT_COLS, DEFAULT_COLS_VISIBILITY } from "./alert-table-utils"; +import { AlertDto } from "./models"; + +interface AlertColumnsSelectProps { + table: Table; + presetName: string; +} + +export default function ColumnSelection({ + table, + presetName, +}: AlertColumnsSelectProps) { + const arrowRef = useRef(null); + const { refs, floatingStyles, context } = useFloating({ + strategy: "fixed", + placement: "bottom-end", + middleware: [ + offset({ mainAxis: 10 }), + arrow({ + element: arrowRef, + }), + ], + }); + const tableColumns = table.getAllColumns(); + + const [, setColumnVisibility] = useLocalStorage( + `column-visibility-${presetName}`, + DEFAULT_COLS_VISIBILITY + ); + + const [columnOrder, setColumnOrder] = useLocalStorage( + `column-order-${presetName}`, + DEFAULT_COLS + ); + + const columnsOptions = tableColumns + .filter((col) => col.getIsPinned() === false) + .map((col) => col.id); + + const selectedColumns = tableColumns + .filter((col) => col.getIsVisible() && col.getIsPinned() === false) + .map((col) => col.id); + + const onMultiSelectChange = ( + event: FormEvent, + closePopover: VoidFunction + ) => { + event.preventDefault(); + + const formData = new FormData(event.currentTarget); + const valueKeys = Object.keys(Object.fromEntries(formData.entries())); + + const newColumnVisibility = columnsOptions.reduce( + (acc, key) => { + if (valueKeys.includes(key)) { + return { ...acc, [key]: true }; + } + + return { ...acc, [key]: false }; + }, + {} + ); + + const originalColsOrder = columnOrder.filter((columnId) => + valueKeys.includes(columnId) + ); + const newlyAddedCols = valueKeys.filter( + (columnId) => !columnOrder.includes(columnId) + ); + + const newColumnOrder = [...originalColsOrder, ...newlyAddedCols]; + + setColumnVisibility(newColumnVisibility); + setColumnOrder(newColumnOrder); + closePopover(); + }; + + + return ( + + {({ close }) => ( + <> + + + onMultiSelectChange(e, close)} + > + + Set table fields +
    + {columnsOptions.map((column) => ( +
  • + +
  • + ))} +
+ +
+ + )} +
+ ); +} diff --git a/keep-ui/app/alerts/ThemeSelection.tsx b/keep-ui/app/alerts/ThemeSelection.tsx new file mode 100644 index 0000000000..f91c2c7a08 --- /dev/null +++ b/keep-ui/app/alerts/ThemeSelection.tsx @@ -0,0 +1,123 @@ +import React, { useState, Fragment, useRef, FormEvent } from 'react'; +import { Popover } from '@headlessui/react'; +import { Button, Tab, TabGroup, TabList, TabPanels, TabPanel } from "@tremor/react"; +import { IoColorPaletteOutline } from 'react-icons/io5'; +import { FloatingArrow, arrow, offset, useFloating } from "@floating-ui/react"; + +const predefinedThemes = { + Transparent: { + critical: 'bg-white', + high: 'bg-white', + warning: 'bg-white', + low: 'bg-white', + info: 'bg-white' + }, + Keep: { + critical: 'bg-orange-400', // Highest opacity for critical + high: 'bg-orange-300', + warning: 'bg-orange-200', + low: 'bg-orange-100', + info: 'bg-orange-50' // Lowest opacity for info + }, + Basic: { + critical: 'bg-red-200', + high: 'bg-orange-200', + warning: 'bg-yellow-200', + low: 'bg-green-200', + info: 'bg-blue-200' + } +}; + +const themeKeyMapping = { + 0: 'Transparent', + 1: 'Keep', + 2: 'Basic' +}; +type ThemeName = keyof typeof predefinedThemes; + +export const ThemeSelection = ({ onThemeChange }: { onThemeChange: (theme: any) => void }) => { + const arrowRef = useRef(null); + const [selectedTab, setSelectedTab] = useState('Transparent'); + + const { refs, floatingStyles, context } = useFloating({ + strategy: "fixed", + placement: "bottom-end", + middleware: [offset({ mainAxis: 10 }), arrow({ element: arrowRef })], + }); + + const handleThemeChange = (event: any) => { + const themeIndex = event as 0 | 1 | 2; + handleApplyTheme(themeIndex as 0 | 1 | 2); + }; + + + + + const handleApplyTheme = (themeKey: keyof typeof themeKeyMapping) => { + const themeName = themeKeyMapping[themeKey]; + setSelectedTab(themeName as ThemeName); +}; + + + + const onApplyTheme = (close: () => void) => { + // themeName is now assured to be a key of predefinedThemes + const themeName: ThemeName = selectedTab; + const newTheme = predefinedThemes[themeName]; // This should now be error-free + onThemeChange(newTheme); + setSelectedTab('Transparent'); // Assuming 'Transparent' is a valid key + close(); // Close the popover + }; + + return ( + + {({ close }) => ( + <> + + + + + Set theme colors + + + Transparent + Keep + Basic + + + {Object.keys(predefinedThemes).map(themeName => ( + + {Object.entries(predefinedThemes[themeName as keyof typeof predefinedThemes]).map(([severity, color]) => ( +
+ {severity.charAt(0).toUpperCase() + severity.slice(1).toLowerCase()} +
+
+ ))} +
+ ))} +
+
+ +
+ + )} +
+ ); +}; diff --git a/keep-ui/app/alerts/TitleAndFilters.tsx b/keep-ui/app/alerts/TitleAndFilters.tsx new file mode 100644 index 0000000000..b6cc1a3018 --- /dev/null +++ b/keep-ui/app/alerts/TitleAndFilters.tsx @@ -0,0 +1,60 @@ +import { Table } from "@tanstack/react-table"; +import { DateRangePicker, DateRangePickerValue, Title } from "@tremor/react"; +import { AlertDto } from "./models"; +import ColumnSelection from "./ColumnSelection"; +import { ThemeSelection } from "./ThemeSelection"; + +type Theme = { + [key: string]: string; +}; + +type TableHeaderProps = { + presetName: string; + alerts: AlertDto[]; + table: Table; + onThemeChange: (newTheme: Theme) => void; +}; + +export const TitleAndFilters = ({ + presetName, + alerts, + table, + onThemeChange, +}: TableHeaderProps) => { + const onDateRangePickerChange = ({ + from: start, + to: end, + }: DateRangePickerValue) => { + table.setColumnFilters((existingFilters) => { + // remove any existing "lastReceived" filters + const filteredArrayFromLastReceived = existingFilters.filter( + ({ id }) => id !== "lastReceived" + ); + + return filteredArrayFromLastReceived.concat({ + id: "lastReceived", + value: { start, end }, + }); + }); + + table.resetPagination(); + }; + + return ( +
+
+ {presetName} +
+
+ +
+ + +
+
+
+ ); +}; diff --git a/keep-ui/app/alerts/ViewAlertModal.css b/keep-ui/app/alerts/ViewAlertModal.css new file mode 100644 index 0000000000..5bf7577266 --- /dev/null +++ b/keep-ui/app/alerts/ViewAlertModal.css @@ -0,0 +1,14 @@ +.line-container { + position: relative; + display: block; +} + +.un-enrich-icon { + position: absolute; + display: none; + left: 0; +} + +.line-container:hover .un-enrich-icon { + display: block; +} diff --git a/keep-ui/app/alerts/ViewAlertModal.tsx b/keep-ui/app/alerts/ViewAlertModal.tsx new file mode 100644 index 0000000000..9284d94749 --- /dev/null +++ b/keep-ui/app/alerts/ViewAlertModal.tsx @@ -0,0 +1,131 @@ +import { AlertDto } from "./models"; // Adjust the import path as needed +import Modal from "@/components/ui/Modal"; // Ensure this path matches your project structure +import {Button, Icon, Switch, Text} from "@tremor/react"; +import { toast } from "react-toastify"; +import { getApiURL } from "../../utils/apiUrl"; +import { useSession } from "next-auth/react"; +import { XMarkIcon } from "@heroicons/react/24/outline"; +import "./ViewAlertModal.css"; +import React, {useState} from "react"; + +interface ViewAlertModalProps { + alert: AlertDto | null | undefined; + handleClose: () => void; + mutate: () => void; +} + +const objectToJSONLine = (obj: any) => { + return JSON.stringify(obj, null, 2).slice(2, -2); +} + +export const ViewAlertModal: React.FC = ({ alert, handleClose, mutate}) => { + const isOpen = !!alert; + const [showHighlightedOnly, setShowHighlightedOnly] = useState(false); + const {data: session} = useSession(); + + const unEnrichAlert = async (key: string) => { + if (confirm(`Are you sure you want to un-enrich ${key}?`)) { + try { + const requestData = { + enrichments: [key], + fingerprint: alert!.fingerprint, + }; + const response = await fetch(`${getApiURL()}/alerts/unenrich`, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${session?.accessToken}`, + }, + body: JSON.stringify(requestData), + }); + + if (response.ok) { + toast.success(`${key} un-enriched successfully!`); + await mutate(); + } else { + // Handle error + toast.error(`Failed to un-enriched ${key}`); + await mutate(); + } + } catch (error) { + // Handle unexpected error + toast.error("An unexpected error occurred"); + } + } + } + + const highlightKeys = (json: any, keys: string[]) => { + + const lines = Object.keys(json).length; + const isLast = (index: number) => index == lines - 1 + + return Object.keys(json).map((key: string, index: number) => { + if (keys.includes(key)) { + return

unEnrichAlert(key)}> + + + + {objectToJSONLine({[key]: json[key]})}{isLast(index) ? null : ","} +

+ } else { + if (!showHighlightedOnly || keys.length == 0) { + return

{objectToJSONLine({[key]: json[key]})}{isLast(index) ? null : ","}

+ } + } + }) + } + + const handleCopy = async () => { + if (alert) { + try { + await navigator.clipboard.writeText(JSON.stringify(alert, null, 2)); + toast.success("Alert copied to clipboard!"); + } catch (err) { + toast.error("Failed to copy alert."); + } + } + }; + + return ( + +
+

Alert Details

+
{/* Adjust gap as needed */} +
+
+ setShowHighlightedOnly(!showHighlightedOnly)} + /> + +
+ + + +
+
+ {alert && ( +
+          

{

+ {highlightKeys(alert, alert.enriched_fields)} +

}

+
+ )} +
+ ); +}; diff --git a/keep-ui/app/alerts/[id]/page.tsx b/keep-ui/app/alerts/[id]/page.tsx new file mode 100644 index 0000000000..89a0076595 --- /dev/null +++ b/keep-ui/app/alerts/[id]/page.tsx @@ -0,0 +1,15 @@ +import AlertsPage from "../alerts.client"; + +type PageProps = { + params: { id: string }; + searchParams: { [key: string]: string | string[] | undefined }; +}; + +export default function Page({ params }: PageProps) { + return ; +} + +export const metadata = { + title: "Keep - Alerts", + description: "Single pane of glass for all your alerts.", +}; diff --git a/keep-ui/app/alerts/alert-actions.tsx b/keep-ui/app/alerts/alert-actions.tsx new file mode 100644 index 0000000000..87a7be34c0 --- /dev/null +++ b/keep-ui/app/alerts/alert-actions.tsx @@ -0,0 +1,139 @@ +import { useState } from "react"; +import { Button } from "@tremor/react"; +import { getSession } from "next-auth/react"; +import { getApiURL } from "utils/apiUrl"; +import { AlertDto } from "./models"; +import { PlusIcon } from "@radix-ui/react-icons"; +import { toast } from "react-toastify"; +import { usePresets } from "utils/hooks/usePresets"; +import { useRouter } from "next/navigation"; +import { SilencedDoorbellNotification } from "@/components/icons"; +import AlertAssociateIncidentModal from "./alert-associate-incident-modal"; + +interface Props { + selectedRowIds: string[]; + alerts: AlertDto[]; + clearRowSelection: () => void; + setDismissModalAlert?: (alert: AlertDto[] | null) => void; + mutateAlerts?: () => void; +} + +export default function AlertActions({ + selectedRowIds, + alerts, + clearRowSelection, + setDismissModalAlert, + mutateAlerts +}: Props) { + const router = useRouter(); + const { useAllPresets } = usePresets(); + const { mutate: presetsMutator } = useAllPresets({ + revalidateOnFocus: false, + }); + const [isIncidentSelectorOpen, setIsIncidentSelectorOpen] = useState(false); + + const selectedAlerts = alerts.filter((_alert, index) => + selectedRowIds.includes(index.toString()) + ); + + async function addOrUpdatePreset() { + const newPresetName = prompt("Enter new preset name"); + if (newPresetName) { + const distinctAlertNames = Array.from( + new Set(selectedAlerts.map((alert) => alert.name)) + ); + const formattedCel = distinctAlertNames.reduce( + (accumulator, currentValue, currentIndex) => { + return ( + accumulator + + (currentIndex > 0 ? " || " : "") + + `name == "${currentValue}"` + ); + }, + "" + ); + const options = [{ value: formattedCel, label: "CEL" }]; + const session = await getSession(); + const apiUrl = getApiURL(); + const response = await fetch(`${apiUrl}/preset`, { + method: "POST", + headers: { + Authorization: `Bearer ${session?.accessToken}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ name: newPresetName, options: options }), + }); + if (response.ok) { + toast(`Preset ${newPresetName} created!`, { + position: "top-left", + type: "success", + }); + presetsMutator(); + clearRowSelection(); + router.replace(`/alerts/${newPresetName}`); + } else { + toast(`Error creating preset ${newPresetName}`, { + position: "top-left", + type: "error", + }); + } + } + } + + const showIncidentSelector = () => { + setIsIncidentSelectorOpen(true); + } + const hideIncidentSelector = () => { + setIsIncidentSelectorOpen(false); + } + + const handleSuccessfulAlertsAssociation = () => { + hideIncidentSelector(); + clearRowSelection(); + if (mutateAlerts) { + mutateAlerts(); + } + } + + return ( +
+ + + + +
+ ); +} diff --git a/keep-ui/app/alerts/alert-assign-ticket-modal.tsx b/keep-ui/app/alerts/alert-assign-ticket-modal.tsx new file mode 100644 index 0000000000..4b6c99cfcd --- /dev/null +++ b/keep-ui/app/alerts/alert-assign-ticket-modal.tsx @@ -0,0 +1,273 @@ +import React from "react"; +import Select, { components } from "react-select"; +import { Button, TextInput, Text } from "@tremor/react"; +import { PlusIcon } from "@heroicons/react/20/solid"; +import { useForm, Controller, SubmitHandler } from "react-hook-form"; +import { Providers } from "./../providers/providers"; +import { useSession } from "next-auth/react"; +import { getApiURL } from "utils/apiUrl"; +import { AlertDto } from "./models"; +import Modal from "@/components/ui/Modal"; + +interface AlertAssignTicketModalProps { + handleClose: () => void; + ticketingProviders: Providers; + alert: AlertDto | null; +} + +interface OptionType { + value: string; + label: string; + id: string; + type: string; + icon?: string; + isAddProvider?: boolean; +} + +interface FormData { + provider: { + id: string; + value: string; + type: string; + }; + ticket_url: string; +} + +const AlertAssignTicketModal = ({ + handleClose, + ticketingProviders, + alert, +}: AlertAssignTicketModalProps) => { + const { + handleSubmit, + control, + formState: { errors }, + } = useForm(); + // get the token + const { data: session } = useSession(); + + // if this modal should not be open, do nothing + if (!alert) return null; + + const onSubmit: SubmitHandler = async (data) => { + try { + // build the formData + const requestData = { + enrichments: { + ticket_type: data.provider.type, + ticket_url: data.ticket_url, + ticket_provider_id: data.provider.value, + }, + fingerprint: alert.fingerprint, + }; + + const response = await fetch(`${getApiURL()}/alerts/enrich`, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${session?.accessToken}`, + }, + body: JSON.stringify(requestData), + }); + + if (response.ok) { + // Handle success + console.log("Ticket assigned successfully"); + alert.ticket_url = data.ticket_url; + handleClose(); + } else { + // Handle error + console.error("Failed to assign ticket"); + } + } catch (error) { + // Handle unexpected error + console.error("An unexpected error occurred"); + } + }; + + const providerOptions: OptionType[] = ticketingProviders.map((provider) => ({ + id: provider.id, + value: provider.id, + label: provider.details.name || "", + type: provider.type, + })); + + const customOptions: OptionType[] = [ + ...providerOptions, + { + value: "add_provider", + label: "Add another ticketing provider", + icon: "plus", + isAddProvider: true, + id: "add_provider", + type: "", + }, + ]; + + const handleOnChange = (option: any) => { + if (option.value === "add_provider") { + window.open("/providers?labels=ticketing", "_blank"); + } + }; + + const Option = (props: any) => { + // Check if the option is 'add_provider' + const isAddProvider = props.data.isAddProvider; + + return ( + +
+ {isAddProvider ? ( + + ) : ( + props.data.type && ( + + ) + )} + + {props.data.label} + +
+
+ ); + }; + + const SingleValue = (props: any) => { + const { children, data } = props; + + return ( + +
+ {data.isAddProvider ? ( + + ) : ( + data.type && ( + + ) + )} + {children} +
+
+ ); + }; + + // if alert is not null, open the modal + const isOpen = alert !== null; + + return ( + +
+ {ticketingProviders.length > 0 ? ( +
+
+ + ( + incident.id === selectedIncident + )?.user_generated_name || + incidents.items.find( + (incident) => incident.id === selectedIncident + )?.ai_generated_name || + "", + } + : null + } + onChange={(selectedOption) => + setSelectedIncident(selectedOption?.value) + } + options={incidents.items?.map((incident) => ({ + value: incident.id, + label: + incident.user_generated_name || + incident.ai_generated_name || + "", + }))} + /> + +
+ + + +
+
+ ) : ( +
+
+ No Incidents Yet +
+ +
+ + + +
+
+ )} +
+
+ ); +}; + +export default AlertAssociateIncidentModal; diff --git a/keep-ui/app/alerts/alert-change-status-modal.tsx b/keep-ui/app/alerts/alert-change-status-modal.tsx new file mode 100644 index 0000000000..6dcb5bcf6c --- /dev/null +++ b/keep-ui/app/alerts/alert-change-status-modal.tsx @@ -0,0 +1,171 @@ +import { Button, Title, Subtitle } from "@tremor/react"; +import Modal from "@/components/ui/Modal"; +import Select, { + CSSObjectWithLabel, + ControlProps, + OptionProps, + GroupBase, +} from "react-select"; +import { useState } from "react"; +import { AlertDto, Status } from "./models"; +import { getApiURL } from "utils/apiUrl"; +import { useSession } from "next-auth/react"; +import { toast } from "react-toastify"; +import { + CheckCircleIcon, + ExclamationCircleIcon, + PauseIcon, + XCircleIcon, + QuestionMarkCircleIcon, +} from "@heroicons/react/24/outline"; +import { usePresets } from "utils/hooks/usePresets"; +import { useAlerts } from "utils/hooks/useAlerts"; + +const statusIcons = { + [Status.Firing]: , + [Status.Resolved]: , + [Status.Acknowledged]: , + [Status.Suppressed]: , + [Status.Pending]: , +}; + +const customSelectStyles = { + control: ( + base: CSSObjectWithLabel, + state: ControlProps< + { value: Status; label: JSX.Element }, + false, + GroupBase<{ value: Status; label: JSX.Element }> + > + ) => ({ + ...base, + borderColor: state.isFocused ? "orange" : base.borderColor, + boxShadow: state.isFocused ? "0 0 0 1px orange" : base.boxShadow, + "&:hover": { + borderColor: "orange", + }, + }), + option: ( + base: CSSObjectWithLabel, + { + isFocused, + }: OptionProps< + { value: Status; label: JSX.Element }, + false, + GroupBase<{ value: Status; label: JSX.Element }> + > + ) => ({ + ...base, + backgroundColor: isFocused ? "rgba(255,165,0,0.1)" : base.backgroundColor, + "&:hover": { + backgroundColor: "rgba(255,165,0,0.2)", + }, + }), +}; + +interface Props { + alert: AlertDto | null | undefined; + handleClose: () => void; + presetName: string; +} + +export default function AlertChangeStatusModal({ + alert, + handleClose, + presetName, +}: Props) { + const { data: session } = useSession(); + const [selectedStatus, setSelectedStatus] = useState(null); + const { useAllPresets } = usePresets(); + const { mutate: presetsMutator } = useAllPresets(); + const { useAllAlerts } = useAlerts(); + const { mutate: alertsMutator } = useAllAlerts(presetName, { + revalidateOnMount: false, + }); + + if (!alert) return null; + + const statusOptions = Object.values(Status) + .filter((status) => status !== alert.status) // Exclude current status + .map((status) => ({ + value: status, + label: ( +
+ {statusIcons[status]} + {status.charAt(0).toUpperCase() + status.slice(1)} +
+ ), + })); + + const clearAndClose = () => { + setSelectedStatus(null); + handleClose(); + }; + + const handleChangeStatus = async () => { + if (!selectedStatus) { + toast.error("Please select a new status."); + return; + } + + try { + const response = await fetch(`${getApiURL()}/alerts/enrich?dispose_on_new_alert=true`, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${session?.accessToken}`, + }, + body: JSON.stringify({ + enrichments: { + status: selectedStatus, + ...(selectedStatus !== Status.Suppressed && { + dismissed: false, + dismissUntil: "", + }), + }, + fingerprint: alert.fingerprint, + }), + }); + + if (response.ok) { + toast.success("Alert status changed successfully!"); + clearAndClose(); + await alertsMutator(); + await presetsMutator(); + } else { + toast.error("Failed to change alert status."); + } + } catch (error) { + toast.error("An error occurred while changing alert status."); + } + }; + + return ( + + Change Alert Status + + Change status from {alert.status} to: +
+ + validateAndSetParams(param.name, value, param.mandatory) + } + > + {param.expected_values!.map((value) => { + return ( + + {value} + + ); + })} + + )} + {param.type.toLowerCase() === "str" && ( + + validateAndSetParams(param.name, value, param.mandatory) + } + /> + )} + {param.type.toLowerCase() === "datetime" && ( + { + if (value) { + validateAndSetParams( + param.name, + value.toISOString(), + param.mandatory + ); + } + }} + /> + )} +
+ ); + }; + + const invokeMethod = async ( + provider: Provider, + method: ProviderMethod, + userParams: { [key: string]: string } + ) => { + const session = await getSession(); + const apiUrl = getApiURL(); + + try { + const response = await fetch( + `${apiUrl}/providers/${provider.id}/invoke/${method.func_name}`, + { + method: "POST", + headers: { + Authorization: `Bearer ${session!.accessToken}`, + "Content-Type": "application/json", + }, + body: JSON.stringify(userParams), + } + ); + const responseObject = await response.json(); + if (response.ok) { + if (method.type === "action") mutate(); + toast.success(`Successfully called "${method.name}"`, { + position: toast.POSITION.TOP_LEFT, + }); + if (method.type === "view") { + setMethodResult(responseObject); + setIsLoading(false); + } + } else { + toast.error( + `Failed to invoke "${method.name}" on ${ + provider.details.name ?? provider.id + } due to ${responseObject.detail}`, + { position: toast.POSITION.TOP_LEFT } + ); + } + } catch (e: any) { + toast.error( + `Failed to invoke "${method.name}" on ${ + provider.details.name ?? provider.id + } due to ${e.message}`, + { position: toast.POSITION.TOP_LEFT } + ); + handleClose(); + } finally { + if (method.type === "action") { + handleClose(); + } + } + }; + + const isInvokeEnabled = () => { + return method.func_params + ?.filter((fp) => fp.mandatory) + .every((fp) => + Object.keys({ + ...inputParameters, + }).includes(fp.name) + ); + }; + + return ( + + {isLoading ? ( + + ) : methodResult ? ( + + ) : ( +
+ {method.func_params?.map((param) => { + return getInputs(param); + })} + +
+ )} +
+ ); +} diff --git a/keep-ui/app/alerts/alert-method-results-table.tsx b/keep-ui/app/alerts/alert-method-results-table.tsx new file mode 100644 index 0000000000..7b3d85d7a2 --- /dev/null +++ b/keep-ui/app/alerts/alert-method-results-table.tsx @@ -0,0 +1,46 @@ +import { + Table, + TableHead, + TableRow, + TableHeaderCell, + TableBody, + TableCell, +} from "@tremor/react"; + +export default function AlertMethodResultsTable({ + results, +}: { + results: string[] | object[]; +}) { + const resultsAreObject = results.length > 0 && typeof results[0] === "object"; + return ( + + + + {!resultsAreObject ? ( + Results + ) : ( + Object.keys(results[0]).map((key, index) => { + return {key}; + }) + )} + + + + {results.map((result, index) => { + return !resultsAreObject ? ( + + {result as string} + + ) : ( + + {Object.values(result).map((value, index) => { + return {value}; + })} + + ); + })} + +
+ ); +} diff --git a/keep-ui/app/alerts/alert-name.tsx b/keep-ui/app/alerts/alert-name.tsx new file mode 100644 index 0000000000..049338de47 --- /dev/null +++ b/keep-ui/app/alerts/alert-name.tsx @@ -0,0 +1,160 @@ +import { + ArrowTopRightOnSquareIcon, + BookOpenIcon, + TicketIcon, + TrashIcon, + PencilSquareIcon, + Cog8ToothIcon, +} from "@heroicons/react/24/outline"; +import { Icon } from "@tremor/react"; +import { AlertDto, AlertToWorkflowExecution } from "./models"; +import { useWorkflowExecutions } from "utils/hooks/useWorkflowExecutions"; +import { useRouter } from "next/navigation"; + +interface Props { + alert: AlertDto; + setNoteModalAlert?: (alert: AlertDto) => void; + setTicketModalAlert?: (alert: AlertDto) => void; +} +export default function AlertName({ + alert, + setNoteModalAlert, + setTicketModalAlert, +}: Props) { + const router = useRouter(); + const { data: executions } = useWorkflowExecutions(); + + const handleNoteClick = () => { + if (setNoteModalAlert) { + setNoteModalAlert(alert); + } + }; + + const handleTicketClick = () => { + if (!ticketUrl && setTicketModalAlert) { + setTicketModalAlert(alert); + } else { + window.open(ticketUrl, "_blank"); // Open the ticket URL in a new tab + } + }; + + const relevantWorkflowExecution = + executions?.find((wf) => wf.alert_fingerprint === alert.fingerprint) ?? + null; + + const { + name, + url, + generatorURL, + deleted, + note, + ticket_url: ticketUrl, + ticket_status: ticketStatus, + playbook_url, + } = alert; + + function handleWorkflowClick( + relevantWorkflowExecution: AlertToWorkflowExecution + ): void { + router.push( + `/workflows/${relevantWorkflowExecution.workflow_id}/runs/${relevantWorkflowExecution.workflow_execution_id}` + ); + } + + return ( +
+
+ {name} +
+
+ {(url ?? generatorURL) && ( + + + + )} + {setTicketModalAlert && ( + + )} + + {playbook_url && ( + + + + )} + {setNoteModalAlert && ( + + )} + + {deleted && ( + + )} + {relevantWorkflowExecution && ( + handleWorkflowClick(relevantWorkflowExecution)} + className="ml-1 cursor-pointer" + variant="solid" + /> + )} +
+
+ ); +} diff --git a/keep-ui/app/alerts/alert-note-modal.tsx b/keep-ui/app/alerts/alert-note-modal.tsx new file mode 100644 index 0000000000..bdd3b9c333 --- /dev/null +++ b/keep-ui/app/alerts/alert-note-modal.tsx @@ -0,0 +1,130 @@ +"use client"; + +import React, { useEffect, useState } from "react"; +// https://github.com/zenoamaro/react-quill/issues/292 +const ReactQuill = + typeof window === "object" ? require("react-quill") : () => false; +import "react-quill/dist/quill.snow.css"; +import { Button } from "@tremor/react"; +import { getApiURL } from "../../utils/apiUrl"; +import { useSession } from "next-auth/react"; +import { AlertDto } from "./models"; +import Modal from "@/components/ui/Modal"; + +interface AlertNoteModalProps { + handleClose: () => void; + alert: AlertDto | null; +} + +const AlertNoteModal = ({ handleClose, alert }: AlertNoteModalProps) => { + const [noteContent, setNoteContent] = useState(""); + + useEffect(() => { + if (alert) { + setNoteContent(alert.note || ""); + } + }, [alert]); + // get the session + const { data: session } = useSession(); + + // if this modal should not be open, do nothing + if (!alert) return null; + + const formats = [ + "header", + "bold", + "italic", + "underline", + "list", + "bullet", + "link", + "align", + "blockquote", + "code-block", + "color", + ]; + + const modules = { + toolbar: [ + [{ header: "1" }, { header: "2" }], + [{ list: "ordered" }, { list: "bullet" }], + ["bold", "italic", "underline"], + ["link"], + [{ align: [] }], + ["blockquote", "code-block"], // Add quote and code block options to the toolbar + [{ color: [] }], // Add color option to the toolbar + ], + }; + + const saveNote = async () => { + try { + // build the formData + const requestData = { + enrichments: { + note: noteContent, + }, + fingerprint: alert.fingerprint, + }; + const response = await fetch(`${getApiURL()}/alerts/enrich`, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${session?.accessToken}`, + }, + body: JSON.stringify(requestData), + }); + + if (response.ok) { + // Handle success + console.log("Note saved successfully"); + handleNoteClose(); + } else { + // Handle error + console.error("Failed to save note"); + } + } catch (error) { + // Handle unexpected error + console.error("An unexpected error occurred"); + } + }; + + const isOpen = alert !== null; + + const handleNoteClose = () => { + alert.note = noteContent; + setNoteContent(""); + handleClose(); + }; + + return ( + + {/* WYSIWYG editor */} + setNoteContent(value)} + theme="snow" // Use the Snow theme + placeholder="Add your note here..." + modules={modules} + formats={formats} // Add formats + /> +
+ + +
+
+ ); +}; + +export default AlertNoteModal; diff --git a/keep-ui/app/alerts/alert-pagination.tsx b/keep-ui/app/alerts/alert-pagination.tsx new file mode 100644 index 0000000000..fe45aec93e --- /dev/null +++ b/keep-ui/app/alerts/alert-pagination.tsx @@ -0,0 +1,133 @@ +import { + ArrowPathIcon, + ChevronDoubleLeftIcon, + ChevronDoubleRightIcon, + ChevronLeftIcon, + ChevronRightIcon, + TableCellsIcon, +} from "@heroicons/react/24/outline"; +import { Button, Text } from "@tremor/react"; +import { StylesConfig, SingleValueProps, components, GroupBase } from 'react-select'; +import Select from 'react-select'; +import { AlertDto } from "./models"; +import { Table } from "@tanstack/react-table"; +import { useAlerts } from "utils/hooks/useAlerts"; + +interface Props { + presetName: string; + table: Table; + isRefreshAllowed: boolean; +} + +interface OptionType { + value: string; + label: string; +} + + const customStyles: StylesConfig> = { + control: (provided, state) => ({ + ...provided, + borderColor: state.isFocused ? 'orange' : provided.borderColor, + '&:hover': { borderColor: 'orange' }, + boxShadow: state.isFocused ? '0 0 0 1px orange' : provided.boxShadow, + }), + singleValue: (provided) => ({ + ...provided, + display: 'flex', + alignItems: 'center', + }), + menu: (provided) => ({ + ...provided, + color: 'orange', + }), + option: (provided, state) => ({ + ...provided, + backgroundColor: state.isSelected ? 'orange' : provided.backgroundColor, + '&:hover': { backgroundColor: state.isSelected ? 'orange' : '#f5f5f5' }, + color: state.isSelected ? 'white' : provided.color, + }), + }; + + const SingleValue = ({ children, ...props }: SingleValueProps>) => ( + + {children} + + + ); + + +export default function AlertPagination({ presetName, table, isRefreshAllowed }: Props) { + const { usePresetAlerts } = useAlerts(); + const { mutate, isLoading: isValidating } = usePresetAlerts(presetName); + + const pageIndex = table.getState().pagination.pageIndex; + const pageCount = table.getPageCount(); + + return ( +
+ + Showing {pageCount === 0 ? 0 : pageIndex + 1} of {pageCount} + +
+ + {selectedSource && ( + + )} + {alertSources.map((source) => ( + +
+ + {source.name.toLowerCase()} +
+
+ ))} + + + Feel free to edit the payload as you want. However, some of the + providers expects specific fields, so be careful. + +
+ {selectedSource && ( + <> +
+ +